mirror of
https://github.com/trailofbits/algo.git
synced 2025-04-22 09:07:04 +02:00
Compare commits
185 commits
Author | SHA1 | Date | |
---|---|---|---|
|
346437fa6e | ||
|
da32bafd2d | ||
|
5a275cd0cd | ||
|
8c4ae501ad | ||
|
6ce6f5c81e | ||
|
a4a9d6d7c8 | ||
|
0d1be722a1 | ||
|
ebf127dc52 | ||
|
74051d06a2 | ||
|
5817300bb1 | ||
|
baf8a85c0b | ||
|
c9352a1801 | ||
|
67aa5fe881 | ||
|
a6ad0adc94 | ||
|
90c2b1bbeb | ||
|
fd6efb71f2 | ||
|
a5b30cdbfe | ||
|
29f5a2f6f8 | ||
|
fc1d3d4d7b | ||
|
199e404ec4 | ||
|
17881b2d2a | ||
|
abb8164054 | ||
|
1cf3d8d66d | ||
|
7d1af5abab | ||
|
1c80cd23f5 | ||
|
75cfeab24a | ||
|
45fe0f595d | ||
|
1c47de7011 | ||
|
1083b4bb97 | ||
|
8be2d689a7 | ||
|
651f949ca6 | ||
|
9cc6b6c3a9 | ||
|
9ef093976b | ||
|
43ed16ea13 | ||
|
347f864abb | ||
|
a43de09437 | ||
|
972723a139 | ||
|
59672d476d | ||
|
ed01f30949 | ||
|
8b05cda01d | ||
|
c0968a8fdb | ||
|
9f241b1886 | ||
|
63bfa63f6a | ||
|
e416e76ce8 | ||
|
b29b310ff3 | ||
|
a103d8dd16 | ||
|
1a86b5fb4b | ||
|
4b1081b7b8 | ||
|
0633fab3b2 | ||
|
0c6e45a194 | ||
|
a924381e9e | ||
|
7203f33f2e | ||
|
c759d75753 | ||
|
1bf8a40d51 | ||
|
7f87f51a03 | ||
|
571daf4464 | ||
|
0ce7840847 | ||
|
f4b51e441d | ||
|
28717ad3a8 | ||
|
de1e9093ab | ||
|
4bed66f19e | ||
|
d06869e1eb | ||
|
4e739b518f | ||
|
6aa177b286 | ||
|
1c6702d3ef | ||
|
4464be8259 | ||
|
e431f21cbb | ||
|
ec1fa3bb57 | ||
|
99473ef104 | ||
|
665af5a7f4 | ||
|
b823278aa6 | ||
|
96988f1b26 | ||
|
e36d237ebc | ||
|
e5235e1bdc | ||
|
728b8aae06 | ||
|
b6f28c753a | ||
|
1f04ab3ec1 | ||
|
c50e0d0aad | ||
|
e4e8e905d5 | ||
|
44d4c274ef | ||
|
70f9f91112 | ||
|
968a676ef0 | ||
|
11c0c2ca71 | ||
|
654809f126 | ||
|
4adb35db80 | ||
|
5fd4cb3c9c | ||
|
e99aebb268 | ||
|
8c560719a5 | ||
|
ebec20ed36 | ||
|
bf6b969f0c | ||
|
b5bb64d07a | ||
|
a1d39aecf5 | ||
|
ad53c69f71 | ||
|
f1f676b75e | ||
|
2821f28866 | ||
|
52f88ad12e | ||
|
060b401880 | ||
|
66e024a015 | ||
|
4172dea436 | ||
|
04a2f9361b | ||
|
7a13a297f5 | ||
|
b1d1491a66 | ||
|
8b2b57deda | ||
|
4e793ddf65 | ||
|
54bb481d2c | ||
|
fa9e43e7b4 | ||
|
be96315775 | ||
|
8894dd0848 | ||
|
5fc738ba8b | ||
|
f0d0e91be0 | ||
|
3fe09bd904 | ||
|
350800fdf7 | ||
|
830877557f | ||
|
f76d361c55 | ||
|
5efa20b79a | ||
|
e4753d2510 | ||
|
47bb48b0fb | ||
|
3f86ae0713 | ||
|
9ac64cbf21 | ||
|
c14ff0d611 | ||
|
7695372e2b | ||
|
6753dc919f | ||
|
ca898d5bf2 | ||
|
eeda23be97 | ||
|
e29615bc05 | ||
|
02fe2f7dd5 | ||
|
27de76048c | ||
|
4f1b9270be | ||
|
c231cd42d6 | ||
|
3f3138f555 | ||
|
28d95eace2 | ||
|
1e8a9c5cf1 | ||
|
512b5660e1 | ||
|
5c09d6dd02 | ||
|
dcfed41ae8 | ||
|
027b1b8497 | ||
|
3720c5eb1f | ||
|
2abbf22196 | ||
|
78cc708435 | ||
|
0efa4eaf91 | ||
|
0e57da8237 | ||
|
6ac2e2d1a4 | ||
|
df57e21194 | ||
|
2d94bbd278 | ||
|
62d00901e6 | ||
|
d8c48ec505 | ||
|
98f43c5cbd | ||
|
24574a3205 | ||
|
0629aa5ca5 | ||
|
cc72728c6d | ||
|
53dfc570eb | ||
|
eb40ade096 | ||
|
625f634163 | ||
|
d635c76b50 | ||
|
b66c9f59aa | ||
|
45aa0065cd | ||
|
221568cd25 | ||
|
d18de4b679 | ||
|
d72f3b5ba3 | ||
|
9f27c25adc | ||
|
43aafdfce1 | ||
|
b65e6b1351 | ||
|
dfd979eb68 | ||
|
5737317dae | ||
|
88eaf30e65 | ||
|
d0ce162559 | ||
|
792e991442 | ||
|
0b4ec243a7 | ||
|
8bdd99c05d | ||
|
61729ac9b5 | ||
|
0c3aada66f | ||
|
fc27b439b5 | ||
|
71e49eb2c8 | ||
|
1ca8ee5554 | ||
|
c6f45ead69 | ||
|
95eddccfb7 | ||
|
3c30074a7f | ||
|
fe7755e6a0 | ||
|
fe19859b00 | ||
|
655729ef54 | ||
|
3dc08c94cf | ||
|
2909107554 | ||
|
211d1b2cab | ||
|
561afe18f9 | ||
|
f8ce1f84a2 |
174 changed files with 4635 additions and 3215 deletions
|
@ -1,3 +1,10 @@
|
|||
skip_list:
|
||||
- yaml
|
||||
- '204'
|
||||
verbosity: 1
|
||||
|
||||
warn_list:
|
||||
- no-changed-when
|
||||
- no-handler
|
||||
- fqcn-builtins
|
||||
- var-spacing
|
||||
|
|
|
@ -9,5 +9,10 @@ README.md
|
|||
config.cfg
|
||||
configs
|
||||
docs
|
||||
.env
|
||||
logo.png
|
||||
tests
|
||||
CHANGELOG.md
|
||||
PULL_REQUEST_TEMPLATE.md
|
||||
Vagrantfile
|
||||
Makefile
|
||||
|
|
13
.github/dependabot.yml
vendored
Normal file
13
.github/dependabot.yml
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
version: 2
|
||||
updates:
|
||||
# Maintain dependencies for GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
# Maintain dependencies for Python
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
44
.github/workflows/docker-image.yaml
vendored
Normal file
44
.github/workflows/docker-image.yaml
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
name: Create and publish a Docker image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ['master']
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
# set latest tag for master branch
|
||||
type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
153
.github/workflows/main.yml
vendored
Normal file
153
.github/workflows/main.yml
vendored
Normal file
|
@ -0,0 +1,153 @@
|
|||
name: Main
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v2.3.2
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
run: |
|
||||
sudo apt update -y
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
sudo snap install shellcheck
|
||||
pip install ansible-lint
|
||||
|
||||
- name: Checks and linters
|
||||
run: |
|
||||
/snap/bin/shellcheck algo install.sh
|
||||
ansible-playbook main.yml --syntax-check
|
||||
ansible-lint -x experimental,package-latest,unnamed-task -v *.yml roles/{local,cloud-*}/*/*.yml || true
|
||||
|
||||
scripted-deploy:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
UBUNTU_VERSION: ["22.04"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v2.3.2
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt install -y \
|
||||
wireguard \
|
||||
libxml2-utils \
|
||||
crudini \
|
||||
fping \
|
||||
strongswan \
|
||||
libstrongswan-standard-plugins \
|
||||
openresolv
|
||||
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install -r requirements.txt
|
||||
|
||||
sudo snap refresh lxd
|
||||
sudo lxd init --auto
|
||||
|
||||
- name: Provision
|
||||
env:
|
||||
DEPLOY: cloud-init
|
||||
UBUNTU_VERSION: ${{ matrix.UBUNTU_VERSION }}
|
||||
REPOSITORY: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name || github.repository }}
|
||||
BRANCH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref || github.ref }}
|
||||
run: |
|
||||
ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
|
||||
# sed -i "s/^reduce_mtu:\s0$/reduce_mtu: 80/" config.cfg
|
||||
sudo -E ./tests/pre-deploy.sh
|
||||
|
||||
- name: Deployment
|
||||
run: |
|
||||
set -x
|
||||
until sudo lxc exec algo -- test -f /var/log/cloud-init-output.log; do echo 'Log file not found, Sleep for 3 seconds'; sleep 3; done
|
||||
( sudo lxc exec algo -- tail -f /var/log/cloud-init-output.log & )
|
||||
until sudo lxc exec algo -- test -f /var/lib/cloud/data/result.json; do
|
||||
echo 'Cloud init is not finished. Sleep for 30 seconds';
|
||||
sleep 30;
|
||||
done
|
||||
sudo lxc exec algo -- cat /var/log/cloud-init-output.log
|
||||
sudo lxc exec algo -- test -f /opt/algo/configs/localhost/.config.yml
|
||||
sudo lxc exec algo -- tar zcf /root/algo-configs.tar -C /opt/algo/configs/ .
|
||||
sudo lxc file pull algo/root/algo-configs.tar ./
|
||||
sudo tar -C ./configs -zxf algo-configs.tar
|
||||
|
||||
- name: Tests
|
||||
run: |
|
||||
set -x
|
||||
sudo -E bash -x ./tests/wireguard-client.sh
|
||||
sudo env "PATH=$PATH" ./tests/ipsec-client.sh
|
||||
|
||||
docker-deploy:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
UBUNTU_VERSION: ["22.04"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v2.3.2
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
run: |
|
||||
set -x
|
||||
sudo apt update -y
|
||||
sudo apt install -y \
|
||||
wireguard \
|
||||
libxml2-utils \
|
||||
crudini \
|
||||
fping \
|
||||
strongswan \
|
||||
libstrongswan-standard-plugins \
|
||||
openresolv
|
||||
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install -r requirements.txt
|
||||
|
||||
sudo snap refresh lxd
|
||||
sudo lxd init --auto
|
||||
|
||||
- name: Provision
|
||||
env:
|
||||
DEPLOY: docker
|
||||
UBUNTU_VERSION: ${{ matrix.UBUNTU_VERSION }}
|
||||
REPOSITORY: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name || github.repository }}
|
||||
BRANCH: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref || github.ref }}
|
||||
run: |
|
||||
ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
|
||||
sed -i "s/^reduce_mtu:\s0$/reduce_mtu: 80/" config.cfg
|
||||
sudo -E ./tests/pre-deploy.sh
|
||||
|
||||
- name: Deployment
|
||||
env:
|
||||
DEPLOY: docker
|
||||
UBUNTU_VERSION: ${{ matrix.UBUNTU_VERSION }}
|
||||
run: |
|
||||
docker build -t local/algo .
|
||||
./tests/local-deploy.sh
|
||||
./tests/update-users.sh
|
||||
|
||||
- name: Tests
|
||||
run: |
|
||||
set -x
|
||||
sudo bash -x ./tests/wireguard-client.sh
|
||||
sudo env "PATH=$PATH" bash -x ./tests/ipsec-client.sh
|
||||
sudo bash -x ./tests/ssh-tunnel.sh
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -3,7 +3,8 @@
|
|||
configs/*
|
||||
inventory_users
|
||||
*.kate-swp
|
||||
env
|
||||
*env
|
||||
.DS_Store
|
||||
venvs/*
|
||||
!venvs/.gitinit
|
||||
.vagrant
|
||||
|
|
123
.travis.yml
123
.travis.yml
|
@ -1,123 +0,0 @@
|
|||
---
|
||||
language: python
|
||||
python: "2.7"
|
||||
dist: xenial
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
addons:
|
||||
apt:
|
||||
sources: &default_sources
|
||||
- sourceline: 'ppa:ubuntu-lxc/stable'
|
||||
- sourceline: 'ppa:wireguard/wireguard'
|
||||
packages: &default_packages
|
||||
- python-pip
|
||||
- lxd
|
||||
- expect-dev
|
||||
- debootstrap
|
||||
- tree
|
||||
- bridge-utils
|
||||
- dnsutils
|
||||
- build-essential
|
||||
- libssl-dev
|
||||
- libffi-dev
|
||||
- python-dev
|
||||
- linux-headers-$(uname -r)
|
||||
- wireguard
|
||||
- libxml2-utils
|
||||
- crudini
|
||||
- fping
|
||||
- strongswan
|
||||
- libstrongswan-standard-plugins
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/lxc/
|
||||
pip: true
|
||||
|
||||
before_cache:
|
||||
- mkdir $HOME/lxc
|
||||
- sudo tar cf $HOME/lxc/cache.tar /var/lib/lxd/images/
|
||||
- sudo chown $USER. $HOME/lxc/cache.tar
|
||||
|
||||
custom_scripts:
|
||||
provisioning: &provisioning
|
||||
- ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
|
||||
- sudo ./tests/pre-deploy.sh
|
||||
- 'sed -i "s/^reduce_mtu:\s0$/reduce_mtu: 20/" config.cfg'
|
||||
tests: &tests
|
||||
- sudo ./tests/wireguard-client.sh
|
||||
- sudo env "PATH=$PATH" ./tests/ipsec-client.sh
|
||||
- sudo ./tests/ssh-tunnel.sh
|
||||
|
||||
stages:
|
||||
- &tests-and-linters
|
||||
stage: Tests
|
||||
name: code checks and linters
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- shellcheck
|
||||
script:
|
||||
- pip install ansible-lint
|
||||
- shellcheck algo install.sh
|
||||
- ansible-playbook main.yml --syntax-check
|
||||
- ansible-lint -v *.yml
|
||||
|
||||
- &deploy-local
|
||||
stage: Deploy
|
||||
name: local deployment from docker
|
||||
addons:
|
||||
apt:
|
||||
sources: *default_sources
|
||||
packages: *default_packages
|
||||
before_install: *provisioning
|
||||
before_script:
|
||||
- docker build -t travis/algo .
|
||||
- ./tests/local-deploy.sh
|
||||
- ./tests/update-users.sh
|
||||
script: *tests
|
||||
|
||||
- &deploy-cloudinit
|
||||
stage: Deploy
|
||||
name: cloud-init deployment
|
||||
addons:
|
||||
apt:
|
||||
sources: *default_sources
|
||||
packages: *default_packages
|
||||
env: DEPLOY=cloud-init
|
||||
before_install: *provisioning
|
||||
before_script:
|
||||
- until sudo lxc exec algo -- test -f /var/log/cloud-init-output.log; do echo 'Log file not found, Sleep for 3 seconds'; sleep 3; done
|
||||
- ( sudo lxc exec algo -- tail -f /var/log/cloud-init-output.log & )
|
||||
- |
|
||||
until sudo lxc exec algo -- test -f /var/lib/cloud/data/result.json; do
|
||||
echo 'Cloud init is not finished. Sleep for 30 seconds';
|
||||
sleep 30;
|
||||
done
|
||||
- sudo lxc exec algo -- test -f /opt/algo/configs/localhost/.config.yml
|
||||
- sudo lxc exec algo -- tar zcf /root/algo-configs.tar -C /opt/algo/configs/ .
|
||||
- sudo lxc file pull algo/root/algo-configs.tar ./
|
||||
- sudo tar -C ./configs -zxf algo-configs.tar
|
||||
script: *tests
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- <<: *tests-and-linters
|
||||
- <<: *deploy-local
|
||||
name: 'Ubuntu 18.04: local deployment from docker'
|
||||
env: DEPLOY=docker UBUNTU_VERSION=18.04
|
||||
- <<: *deploy-local
|
||||
name: 'Ubuntu 19.04: local deployment from docker'
|
||||
env: DEPLOY=docker UBUNTU_VERSION=19.04
|
||||
- <<: *deploy-cloudinit
|
||||
name: 'Ubuntu 18.04: cloud-init deployment'
|
||||
env: DEPLOY=cloud-init UBUNTU_VERSION=18.04
|
||||
- <<: *deploy-cloudinit
|
||||
name: 'Ubuntu 19.04: cloud-init deployment'
|
||||
env: DEPLOY=cloud-init UBUNTU_VERSION=19.04
|
||||
|
||||
notifications:
|
||||
email: false
|
31
CHANGELOG.md
31
CHANGELOG.md
|
@ -1,7 +1,34 @@
|
|||
## 1.2 [(Unreleased)](https://github.com/trailofbits/algo/tree/HEAD)
|
||||
|
||||
### Added
|
||||
- New provider CloudStack added [\#1420](https://github.com/trailofbits/algo/pull/1420)
|
||||
- Support for Ubuntu 20.04 [\#1782](https://github.com/trailofbits/algo/pull/1782)
|
||||
- Allow WireGuard to listen on port 53 [\#1594](https://github.com/trailofbits/algo/pull/1594)
|
||||
- Introducing Makefile [\#1553](https://github.com/trailofbits/algo/pull/1553)
|
||||
- Option to unblock SMB and Netbios [\#1558](https://github.com/trailofbits/algo/pull/1558)
|
||||
- Allow OnDemand to be toggled later [\#1557](https://github.com/trailofbits/algo/pull/1557)
|
||||
- New provider Hetzner added [\#1549](https://github.com/trailofbits/algo/pull/1549)
|
||||
- Alternative Ingress IP [\#1605](https://github.com/trailofbits/algo/pull/1605)
|
||||
|
||||
## 1.1 [(Jul 31, 2019)](https://github.com/trailofbits/algo/tree/v1.1)
|
||||
### Fixes
|
||||
- WSL private SSH key permissions [\#1584](https://github.com/trailofbits/algo/pull/1584)
|
||||
- Scaleway instance creating issue [\#1549](https://github.com/trailofbits/algo/pull/1549)
|
||||
|
||||
### Changed
|
||||
- Discontinue use of the WireGuard PPA [\#1855](https://github.com/trailofbits/algo/pull/1855)
|
||||
- SSH changes [\#1636](https://github.com/trailofbits/algo/pull/1636)
|
||||
- Default port is set to `4160` and can be changed in the config
|
||||
- SSH user for every cloud provider is `algo`
|
||||
- EC2: enable EBS encryption by default [\#1556](https://github.com/trailofbits/algo/pull/1556)
|
||||
- Upgrades [\#1549](https://github.com/trailofbits/algo/pull/1549)
|
||||
- Python 3
|
||||
- Ansible 2.9 [\#1777](https://github.com/trailofbits/algo/pull/1777)
|
||||
|
||||
### Breaking changes
|
||||
- Python virtual environment moved to .env [\#1549](https://github.com/trailofbits/algo/pull/1549)
|
||||
|
||||
|
||||
## 1.1 [(Jul 31, 2019)](https://github.com/trailofbits/algo/releases/tag/v1.1)
|
||||
|
||||
### Removed
|
||||
- IKEv2 for Windows is now deleted, use Wireguard [\#1493](https://github.com/trailofbits/algo/issues/1493)
|
||||
|
@ -36,7 +63,7 @@
|
|||
- Simplify Apple Profile Configuration Template [\#1033](https://github.com/trailofbits/algo/pull/1033) ([faf0](https://github.com/faf0))
|
||||
- Include roles as separate tasks [\#1365](https://github.com/trailofbits/algo/pull/1365) ([jackivanov](https://github.com/jackivanov))
|
||||
|
||||
## 1.0 [(Mar 19, 2019)](https://github.com/trailofbits/algo/tree/v1.0)
|
||||
## 1.0 [(Mar 19, 2019)](https://github.com/trailofbits/algo/releases/tag/v1.0)
|
||||
|
||||
### Added
|
||||
- Tagged releases and changelog [\#724](https://github.com/trailofbits/algo/issues/724)
|
||||
|
|
1
CODEOWNERS
Normal file
1
CODEOWNERS
Normal file
|
@ -0,0 +1 @@
|
|||
* @jackivanov
|
17
Dockerfile
17
Dockerfile
|
@ -1,8 +1,7 @@
|
|||
FROM python:2-alpine
|
||||
FROM python:3.11-alpine
|
||||
|
||||
ARG VERSION="git"
|
||||
ARG PACKAGES="bash libffi openssh-client openssl rsync tini"
|
||||
ARG BUILD_PACKAGES="gcc libffi-dev linux-headers make musl-dev openssl-dev"
|
||||
ARG PACKAGES="bash libffi openssh-client openssl rsync tini gcc libffi-dev linux-headers make musl-dev openssl-dev rust cargo"
|
||||
|
||||
LABEL name="algo" \
|
||||
version="${VERSION}" \
|
||||
|
@ -15,13 +14,11 @@ RUN mkdir -p /algo && mkdir -p /algo/configs
|
|||
|
||||
WORKDIR /algo
|
||||
COPY requirements.txt .
|
||||
RUN apk --no-cache add ${BUILD_PACKAGES} && \
|
||||
python -m pip --no-cache-dir install -U pip && \
|
||||
python -m pip --no-cache-dir install virtualenv && \
|
||||
python -m virtualenv env && \
|
||||
source env/bin/activate && \
|
||||
python -m pip --no-cache-dir install -r requirements.txt && \
|
||||
apk del ${BUILD_PACKAGES}
|
||||
RUN python3 -m pip --no-cache-dir install -U pip && \
|
||||
python3 -m pip --no-cache-dir install virtualenv && \
|
||||
python3 -m virtualenv .env && \
|
||||
source .env/bin/activate && \
|
||||
python3 -m pip --no-cache-dir install -r requirements.txt
|
||||
COPY . .
|
||||
RUN chmod 0755 /algo/algo-docker.sh
|
||||
|
||||
|
|
39
Makefile
Normal file
39
Makefile
Normal file
|
@ -0,0 +1,39 @@
|
|||
## docker-build: Build and tag a docker image
|
||||
.PHONY: docker-build
|
||||
|
||||
IMAGE := trailofbits/algo
|
||||
TAG := latest
|
||||
DOCKERFILE := Dockerfile
|
||||
CONFIGURATIONS := $(shell pwd)
|
||||
|
||||
docker-build:
|
||||
docker build \
|
||||
-t $(IMAGE):$(TAG) \
|
||||
-f $(DOCKERFILE) \
|
||||
.
|
||||
|
||||
## docker-deploy: Mount config directory and deploy Algo
|
||||
.PHONY: docker-deploy
|
||||
|
||||
# '--rm' flag removes the container when finished.
|
||||
docker-deploy:
|
||||
docker run \
|
||||
--cap-drop=all \
|
||||
--rm \
|
||||
-it \
|
||||
-v $(CONFIGURATIONS):/data \
|
||||
$(IMAGE):$(TAG)
|
||||
|
||||
## docker-clean: Remove images and containers.
|
||||
.PHONY: docker-prune
|
||||
|
||||
docker-prune:
|
||||
docker images \
|
||||
$(IMAGE) |\
|
||||
awk '{if (NR>1) print $$3}' |\
|
||||
xargs docker rmi
|
||||
|
||||
## docker-all: Build, Deploy, Prune
|
||||
.PHONY: docker-all
|
||||
|
||||
docker-all: docker-build docker-deploy docker-prune
|
|
@ -14,9 +14,9 @@
|
|||
|
||||
## Types of changes
|
||||
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
|
||||
- [] Bug fix (non-breaking change which fixes an issue)
|
||||
- [] New feature (non-breaking change which adds functionality)
|
||||
- [] Breaking change (fix or feature that would cause existing functionality to not work as expected)
|
||||
- Bug fix (non-breaking change which fixes an issue)
|
||||
- New feature (non-breaking change which adds functionality)
|
||||
- Breaking change (fix or feature that would cause existing functionality to not work as expected)
|
||||
|
||||
## Checklist:
|
||||
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
|
||||
|
|
176
README.md
176
README.md
|
@ -1,75 +1,88 @@
|
|||
# Algo VPN
|
||||
|
||||
[](https://gitter.im/trailofbits/algo?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://twitter.com/AlgoVPN)
|
||||
[](https://travis-ci.org/trailofbits/algo)
|
||||
[](https://github.com/trailofbits/algo/actions)
|
||||
|
||||
Algo VPN is a set of Ansible scripts that simplify the setup of a personal IPSEC and Wireguard VPN. It uses the most secure defaults available, works with common cloud providers, and does not require client software on most devices. See our [release announcement](https://blog.trailofbits.com/2016/12/12/meet-algo-the-vpn-that-works/) for more information.
|
||||
Algo VPN is a set of Ansible scripts that simplify the setup of a personal WireGuard and IPsec VPN. It uses the most secure defaults available and works with common cloud providers. See our [release announcement](https://blog.trailofbits.com/2016/12/12/meet-algo-the-vpn-that-works/) for more information.
|
||||
|
||||
## Features
|
||||
|
||||
* Supports only IKEv2 with strong crypto (AES-GCM, SHA2, and P-256) and [WireGuard](https://www.wireguard.com/)
|
||||
* Generates Apple profiles to auto-configure iOS and macOS devices
|
||||
* Supports only IKEv2 with strong crypto (AES-GCM, SHA2, and P-256) for iOS, macOS, and Linux
|
||||
* Supports [WireGuard](https://www.wireguard.com/) for all of the above, in addition to Android and Windows 10
|
||||
* Generates .conf files and QR codes for iOS, macOS, Android, and Windows WireGuard clients
|
||||
* Generates Apple profiles to auto-configure iOS and macOS devices for IPsec - no client software required
|
||||
* Includes a helper script to add and remove users
|
||||
* Blocks ads with a local DNS resolver (optional)
|
||||
* Sets up limited SSH users for tunneling traffic (optional)
|
||||
* Based on current versions of Ubuntu and strongSwan
|
||||
* Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Vultr, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack, or [your own Ubuntu server](docs/deploy-to-ubuntu.md)
|
||||
* Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Vultr, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack, CloudStack, Hetzner Cloud, Linode, or [your own Ubuntu server (for more advanced users)](docs/deploy-to-ubuntu.md)
|
||||
|
||||
## Anti-features
|
||||
|
||||
* Does not support legacy cipher suites or protocols like L2TP, IKEv1, or RSA
|
||||
* Does not install Tor, OpenVPN, or other risky servers
|
||||
* Does not depend on the security of [TLS](https://tools.ietf.org/html/rfc7457)
|
||||
* Does not require client software on most platforms
|
||||
* Does not claim to provide anonymity or censorship avoidance
|
||||
* Does not claim to protect you from the [FSB](https://en.wikipedia.org/wiki/Federal_Security_Service), [MSS](https://en.wikipedia.org/wiki/Ministry_of_State_Security_(China)), [DGSE](https://en.wikipedia.org/wiki/Directorate-General_for_External_Security), or [FSM](https://en.wikipedia.org/wiki/Flying_Spaghetti_Monster)
|
||||
|
||||
## Deploy the Algo Server
|
||||
|
||||
The easiest way to get an Algo server running is to let it set up a _new_ virtual machine in the cloud for you.
|
||||
The easiest way to get an Algo server running is to run it on your local system or from [Google Cloud Shell](docs/deploy-from-cloudshell.md) and let it set up a _new_ virtual machine in the cloud for you.
|
||||
|
||||
1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon Lightsail](https://aws.amazon.com/lightsail/), [Amazon EC2](https://aws.amazon.com/), [Vultr](https://www.vultr.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/), and [DreamCompute](https://www.dreamhost.com/cloud/computing/) or other OpenStack-based cloud hosting.
|
||||
1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon Lightsail](https://aws.amazon.com/lightsail/), [Amazon EC2](https://aws.amazon.com/), [Vultr](https://www.vultr.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/), [DreamCompute](https://www.dreamhost.com/cloud/computing/), [Linode](https://www.linode.com), or other OpenStack-based cloud hosting, [Exoscale](https://www.exoscale.com) or other CloudStack-based cloud hosting, or [Hetzner Cloud](https://www.hetzner.com/).
|
||||
|
||||
2. **[Download Algo](https://github.com/trailofbits/algo/archive/master.zip).** Unzip it in a convenient location on your local machine.
|
||||
2. **Get a copy of Algo.** The Algo scripts will be installed on your local system. There are two ways to get a copy:
|
||||
|
||||
3. **Install Algo's core dependencies.** Open the Terminal. The `python` interpreter you use to deploy Algo must be python2. If you don't know what this means, you're probably fine. `cd` into the `algo-master` directory where you unzipped Algo, then run:
|
||||
- Download the [ZIP file](https://github.com/trailofbits/algo/archive/master.zip). Unzip the file to create a directory named `algo-master` containing the Algo scripts.
|
||||
|
||||
- macOS:
|
||||
- Use `git clone` to create a directory named `algo` containing the Algo scripts:
|
||||
```bash
|
||||
$ python -m ensurepip --user
|
||||
$ python -m pip install --user --upgrade virtualenv
|
||||
git clone https://github.com/trailofbits/algo.git
|
||||
```
|
||||
- Linux (deb-based):
|
||||
|
||||
3. **Install Algo's core dependencies.** Algo requires that **Python 3.10 or later** and at least one supporting package are installed on your system.
|
||||
|
||||
- **macOS:** Catalina (10.15) and higher includes Python 3 as part of the optional Command Line Developer Tools package. From Terminal run:
|
||||
|
||||
```bash
|
||||
$ sudo apt-get update && sudo apt-get install \
|
||||
build-essential \
|
||||
libssl-dev \
|
||||
libffi-dev \
|
||||
python-dev \
|
||||
python-pip \
|
||||
python-setuptools \
|
||||
python-virtualenv -y
|
||||
python3 -m pip install --user --upgrade virtualenv
|
||||
```
|
||||
- Linux (rpm-based): See the pre-installation documentation for [RedHat/CentOS 6.x](docs/deploy-from-redhat-centos6.md) or [Fedora](docs/deploy-from-fedora-workstation.md)
|
||||
- Windows: See the [Windows documentation](docs/deploy-from-windows.md)
|
||||
|
||||
4. **Install Algo's remaining dependencies.** Use the same Terminal window as the previous step and run:
|
||||
If prompted, install the Command Line Developer Tools and re-run the above command.
|
||||
|
||||
For macOS versions prior to Catalina, see [Deploy from macOS](docs/deploy-from-macos.md) for information on installing Python 3 .
|
||||
|
||||
- **Linux:** Recent releases of Ubuntu, Debian, and Fedora come with Python 3 already installed. If your Python version is not 3.10, then you will need to use pyenv to install Python 3.10. Make sure your system is up-to-date and install the supporting package(s):
|
||||
* Ubuntu and Debian:
|
||||
```bash
|
||||
$ python -m virtualenv --python=`which python2` env &&
|
||||
source env/bin/activate &&
|
||||
python -m pip install -U pip virtualenv &&
|
||||
python -m pip install -r requirements.txt
|
||||
sudo apt install -y --no-install-recommends python3-virtualenv file lookup
|
||||
```
|
||||
On macOS, you may be prompted to install `cc`. You should press accept if so.
|
||||
On a Raspberry Pi running Ubuntu also install `libffi-dev` and `libssl-dev`.
|
||||
|
||||
5. **List the users to create.** Open `config.cfg` in your favorite text editor. Specify the users you wish to create in the `users` list. If you want to be able to add or delete users later, you **must** select `yes` for the `Do you want to retain the CA key?` prompt during the deployment. Make a unique user for each device you plan to setup.
|
||||
* Fedora:
|
||||
```bash
|
||||
sudo dnf install -y python3-virtualenv
|
||||
```
|
||||
|
||||
6. **Start the deployment.** Return to your terminal. In the Algo directory, run `./algo` and follow the instructions. There are several optional features available. None are required for a fully functional VPN server. These optional features are described in greater detail in [deploy-from-ansible.md](docs/deploy-from-ansible.md).
|
||||
- **Windows:** Use the Windows Subsystem for Linux (WSL) to create your own copy of Ubuntu running under Windows from which to install and run Algo. See the [Windows documentation](docs/deploy-from-windows.md) for more information.
|
||||
|
||||
That's it! You will get the message below when the server deployment process completes. You now have an Algo server on the internet. Take note of the p12 (user certificate) password and the CA key in case you need them later, **they will only be displayed this time**.
|
||||
4. **Install Algo's remaining dependencies.** You'll need to run these commands from the Algo directory each time you download a new copy of Algo. In a Terminal window `cd` into the `algo-master` (ZIP file) or `algo` (`git clone`) directory and run:
|
||||
```bash
|
||||
python3 -m virtualenv --python="$(command -v python3)" .env &&
|
||||
source .env/bin/activate &&
|
||||
python3 -m pip install -U pip virtualenv &&
|
||||
python3 -m pip install -r requirements.txt
|
||||
```
|
||||
On Fedora first run `export TMPDIR=/var/tmp`, then add the option `--system-site-packages` to the first command above (after `python3 -m virtualenv`). On macOS install the C compiler if prompted.
|
||||
|
||||
You can now setup clients to connect it, e.g. your iPhone or laptop. Proceed to [Configure the VPN Clients](#configure-the-vpn-clients) below.
|
||||
5. **Set your configuration options.** Open the file `config.cfg` in your favorite text editor. Specify the users you wish to create in the `users` list. Create a unique user for each device you plan to connect to your VPN.
|
||||
> Note: [IKEv2 Only] If you want to add or delete users later, you **must** select `yes` at the `Do you want to retain the keys (PKI)?` prompt during the server deployment. You should also review the other options before deployment, as changing your mind about them later [may require you to deploy a brand new server](https://github.com/trailofbits/algo/blob/master/docs/faq.md#i-deployed-an-algo-server-can-you-update-it-with-new-features).
|
||||
|
||||
6. **Start the deployment.** Return to your terminal. In the Algo directory, run `./algo` and follow the instructions. There are several optional features available, none of which are required for a fully functional VPN server. These optional features are described in greater detail in [here](docs/deploy-from-ansible.md).
|
||||
|
||||
That's it! You will get the message below when the server deployment process completes. Take note of the p12 (user certificate) password and the CA key in case you need them later, **they will only be displayed this time**.
|
||||
|
||||
You can now set up clients to connect to your VPN. Proceed to [Configure the VPN Clients](#configure-the-vpn-clients) below.
|
||||
|
||||
```
|
||||
"# Congratulations! #"
|
||||
|
@ -80,7 +93,7 @@ You can now setup clients to connect it, e.g. your iPhone or laptop. Proceed to
|
|||
"# Local DNS resolver 172.16.0.1 #"
|
||||
"# The p12 and SSH keys password for new users is XXXXXXXX #"
|
||||
"# The CA key password is XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #"
|
||||
"# Shell access: ssh -i configs/algo.pem root@xxx.xxx.xx.xx #"
|
||||
"# Shell access: ssh -F configs/<server_ip>/ssh_config <hostname> #"
|
||||
```
|
||||
|
||||
## Configure the VPN Clients
|
||||
|
@ -111,36 +124,17 @@ WireGuard is used to provide VPN services on Windows. Algo generates a WireGuard
|
|||
|
||||
Install the [WireGuard VPN Client](https://www.wireguard.com/install/#windows-7-8-81-10-2012-2016-2019). Import the generated `wireguard/<username>.conf` file to your device, then setup a new connection with it.
|
||||
|
||||
### Linux Network Manager Clients (e.g., Ubuntu, Debian, or Fedora Desktop)
|
||||
### Linux WireGuard Clients
|
||||
|
||||
Network Manager does not support AES-GCM. In order to support Linux Desktop clients, choose the "compatible" cryptography during the deploy process and use at least Network Manager 1.4.1. See [Issue #263](https://github.com/trailofbits/algo/issues/263) for more information.
|
||||
WireGuard works great with Linux clients. See [this page](docs/client-linux-wireguard.md) for an example of how to configure WireGuard on Ubuntu.
|
||||
|
||||
### Linux strongSwan Clients (e.g., OpenWRT, Ubuntu Server, etc.)
|
||||
### Linux strongSwan IPsec Clients (e.g., OpenWRT, Ubuntu Server, etc.)
|
||||
|
||||
Install strongSwan, then copy the included ipsec_user.conf, ipsec_user.secrets, user.crt (user certificate), and user.key (private key) files to your client device. These will require customization based on your exact use case. These files were originally generated with a point-to-point OpenWRT-based VPN in mind.
|
||||
Please see [this page](docs/client-linux-ipsec.md).
|
||||
|
||||
#### Ubuntu Server example
|
||||
### OpenWrt Wireguard Clients
|
||||
|
||||
1. `sudo apt-get install strongswan libstrongswan-standard-plugins`: install strongSwan
|
||||
2. `/etc/ipsec.d/certs`: copy `<name>.crt` from `algo-master/configs/<server_ip>/ipsec/manual/<name>.crt`
|
||||
3. `/etc/ipsec.d/private`: copy `<name>.key` from `algo-master/configs/<server_ip>/ipsec/manual/<name>.key`
|
||||
4. `/etc/ipsec.d/cacerts`: copy `cacert.pem` from `algo-master/configs/<server_ip>/ipsec/manual/cacert.pem`
|
||||
5. `/etc/ipsec.secrets`: add your `user.key` to the list, e.g. `<server_ip> : ECDSA <name>.key`
|
||||
6. `/etc/ipsec.conf`: add the connection from `ipsec_user.conf` and ensure `leftcert` matches the `<name>.crt` filename
|
||||
7. `sudo ipsec restart`: pick up config changes
|
||||
8. `sudo ipsec up <conn-name>`: start the ipsec tunnel
|
||||
9. `sudo ipsec down <conn-name>`: shutdown the ipsec tunnel
|
||||
|
||||
One common use case is to let your server access your local LAN without going through the VPN. Set up a passthrough connection by adding the following to `/etc/ipsec.conf`:
|
||||
|
||||
conn lan-passthrough
|
||||
leftsubnet=192.168.1.1/24 # Replace with your LAN subnet
|
||||
rightsubnet=192.168.1.1/24 # Replace with your LAN subnet
|
||||
authby=never # No authentication necessary
|
||||
type=pass # passthrough
|
||||
auto=route # no need to ipsec up lan-passthrough
|
||||
|
||||
To configure the connection to come up at boot time replace `auto=add` with `auto=start`.
|
||||
Please see [this page](docs/client-openwrt-router-wireguard.md).
|
||||
|
||||
### Other Devices
|
||||
|
||||
|
@ -158,36 +152,80 @@ Depending on the platform, you may need one or multiple of the following files.
|
|||
|
||||
If you turned on the optional SSH tunneling role, then local user accounts will be created for each user in `config.cfg` and SSH authorized_key files for them will be in the `configs` directory (user.ssh.pem). SSH user accounts do not have shell access, cannot authenticate with a password, and only have limited tunneling options (e.g., `ssh -N` is required). This ensures that SSH users have the least access required to setup a tunnel and can perform no other actions on the Algo server.
|
||||
|
||||
Use the example command below to start an SSH tunnel by replacing `user` and `ip` with your own. Once the tunnel is setup, you can configure a browser or other application to use 127.0.0.1:1080 as a SOCKS proxy to route traffic through the Algo server.
|
||||
Use the example command below to start an SSH tunnel by replacing `<user>` and `<ip>` with your own. Once the tunnel is setup, you can configure a browser or other application to use 127.0.0.1:1080 as a SOCKS proxy to route traffic through the Algo server:
|
||||
|
||||
`ssh -D 127.0.0.1:1080 -f -q -C -N user@ip -i configs/<server_ip>/ssh-tunnel/<user>.pem`
|
||||
```bash
|
||||
ssh -D 127.0.0.1:1080 -f -q -C -N <user>@algo -i configs/<ip>/ssh-tunnel/<user>.pem -F configs/<ip>/ssh_config
|
||||
```
|
||||
|
||||
## SSH into Algo Server
|
||||
|
||||
Your Algo server is configured for key-only SSH access for administrative purposes. Open the Terminal app, `cd` into the `algo-master` directory where you originally downloaded Algo, and then use the command listed on the success message:
|
||||
|
||||
`ssh -i configs/algo.pem user@ip`
|
||||
```
|
||||
ssh -F configs/<ip>/ssh_config <hostname>
|
||||
```
|
||||
|
||||
where `user` is either `root` or `ubuntu` as listed on the success message, and `ip` is the IP address of your Algo server. If you find yourself regularly logging into the server then it will be useful to load your Algo ssh key automatically. Add the following snippet to the bottom of `~/.bash_profile` to add it to your shell environment permanently.
|
||||
where `<ip>` is the IP address of your Algo server. If you find yourself regularly logging into the server then it will be useful to load your Algo ssh key automatically. Add the following snippet to the bottom of `~/.bash_profile` to add it to your shell environment permanently:
|
||||
|
||||
`ssh-add ~/.ssh/algo > /dev/null 2>&1`
|
||||
```
|
||||
ssh-add ~/.ssh/algo > /dev/null 2>&1
|
||||
```
|
||||
|
||||
Alternatively, you can choose to include the generated configuration for any Algo servers created into your SSH config. Edit the file `~/.ssh/config` to include this directive at the top:
|
||||
|
||||
```
|
||||
Include <algodirectory>/configs/*/ssh_config
|
||||
```
|
||||
|
||||
where `<algodirectory>` is the directory where you cloned Algo.
|
||||
|
||||
## Adding or Removing Users
|
||||
|
||||
_If you chose to save the CA key during the deploy process,_ then Algo's own scripts can easily add and remove users from the VPN server.
|
||||
|
||||
1. Update the `users` list in your `config.cfg`
|
||||
2. Open a terminal, `cd` to the algo directory, and activate the virtual environment with `source env/bin/activate`
|
||||
2. Open a terminal, `cd` to the algo directory, and activate the virtual environment with `source .env/bin/activate`
|
||||
3. Run the command: `./algo update-users`
|
||||
|
||||
After this process completes, the Algo VPN server will contain only the users listed in the `config.cfg` file.
|
||||
|
||||
## Additional Documentation
|
||||
* [Deployment instructions, cloud provider setup instructions, and further client setup instructions available here.](docs/index.md)
|
||||
* [FAQ](docs/faq.md)
|
||||
* [Troubleshooting](docs/troubleshooting.md)
|
||||
* How Algo uses [Firewalls](docs/firewalls.md)
|
||||
|
||||
If you read all the documentation and have further questions, [join the chat on Gitter](https://gitter.im/trailofbits/algo).
|
||||
### Setup Instructions for Specific Cloud Providers
|
||||
* Configure [Amazon EC2](docs/cloud-amazon-ec2.md)
|
||||
* Configure [Azure](docs/cloud-azure.md)
|
||||
* Configure [DigitalOcean](docs/cloud-do.md)
|
||||
* Configure [Google Cloud Platform](docs/cloud-gce.md)
|
||||
* Configure [Vultr](docs/cloud-vultr.md)
|
||||
* Configure [CloudStack](docs/cloud-cloudstack.md)
|
||||
* Configure [Hetzner Cloud](docs/cloud-hetzner.md)
|
||||
|
||||
### Install and Deploy from Common Platforms
|
||||
* Deploy from [macOS](docs/deploy-from-macos.md)
|
||||
* Deploy from [Windows](docs/deploy-from-windows.md)
|
||||
* Deploy from [Google Cloud Shell](docs/deploy-from-cloudshell.md)
|
||||
* Deploy from a [Docker container](docs/deploy-from-docker.md)
|
||||
|
||||
### Setup VPN Clients to Connect to the Server
|
||||
* Setup [Android](docs/client-android.md) clients
|
||||
* Setup [Linux](docs/client-linux.md) clients with Ansible
|
||||
* Setup Ubuntu clients to use [WireGuard](docs/client-linux-wireguard.md)
|
||||
* Setup Linux clients to use [IPsec](docs/client-linux-ipsec.md)
|
||||
* Setup Apple devices to use [IPsec](docs/client-apple-ipsec.md)
|
||||
* Setup Macs running macOS 10.13 or older to use [WireGuard](docs/client-macos-wireguard.md)
|
||||
|
||||
### Advanced Deployment
|
||||
* Deploy to your own [Ubuntu](docs/deploy-to-ubuntu.md) server, and road warrior setup
|
||||
* Deploy from [Ansible](docs/deploy-from-ansible.md) non-interactively
|
||||
* Deploy onto a [cloud server at time of creation with shell script or cloud-init](docs/deploy-from-script-or-cloud-init-to-localhost.md)
|
||||
* Deploy to an [unsupported cloud provider](docs/deploy-to-unsupported-cloud.md)
|
||||
* Deploy to your own [FreeBSD](docs/deploy-to-freebsd.md) server
|
||||
|
||||
If you've read all the documentation and have further questions, [create a new discussion](https://github.com/trailofbits/algo/discussions).
|
||||
|
||||
## Endorsements
|
||||
|
||||
|
|
9
SECURITY.md
Normal file
9
SECURITY.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
# Reporting Security Issues
|
||||
|
||||
The Algo team and community take security bugs in Algo seriously. We appreciate your efforts to responsibly disclose your findings, and will make every effort to acknowledge your contributions.
|
||||
|
||||
To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/trailofbits/algo/security/) tab.
|
||||
|
||||
The Algo team will send a response indicating the next steps in handling your report. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance.
|
||||
|
||||
Report security bugs in third-party modules to the person or team maintaining the module.
|
36
Vagrantfile
vendored
Normal file
36
Vagrantfile
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "bento/ubuntu-20.04"
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.name = "algo-20.04"
|
||||
v.memory = "512"
|
||||
v.cpus = "1"
|
||||
end
|
||||
|
||||
config.vm.synced_folder "./", "/opt/algo", create: true
|
||||
|
||||
config.vm.provision "ansible_local" do |ansible|
|
||||
ansible.playbook = "/opt/algo/main.yml"
|
||||
|
||||
# https://github.com/hashicorp/vagrant/issues/12204
|
||||
ansible.pip_install_cmd = "sudo apt-get install -y python3-pip python-is-python3 && sudo ln -s -f /usr/bin/pip3 /usr/bin/pip"
|
||||
ansible.install_mode = "pip_args_only"
|
||||
ansible.pip_args = "-r /opt/algo/requirements.txt"
|
||||
ansible.inventory_path = "/opt/algo/inventory"
|
||||
ansible.limit = "local"
|
||||
ansible.verbose = "-vvvv"
|
||||
ansible.extra_vars = {
|
||||
provider: "local",
|
||||
server: "localhost",
|
||||
ssh_user: "",
|
||||
endpoint: "127.0.0.1",
|
||||
ondemand_cellular: true,
|
||||
ondemand_wifi: false,
|
||||
dns_adblocking: true,
|
||||
ssh_tunneling: true,
|
||||
store_pki: true,
|
||||
tests: true,
|
||||
no_log: false
|
||||
}
|
||||
end
|
||||
end
|
2
algo
2
algo
|
@ -4,7 +4,7 @@ set -e
|
|||
|
||||
if [ -z ${VIRTUAL_ENV+x} ]
|
||||
then
|
||||
ACTIVATE_SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/env/bin/activate"
|
||||
ACTIVATE_SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.env/bin/activate"
|
||||
if [ -f "$ACTIVATE_SCRIPT" ]
|
||||
then
|
||||
# shellcheck source=/dev/null
|
||||
|
|
|
@ -11,7 +11,7 @@ usage() {
|
|||
retcode="${1:-0}"
|
||||
echo "To run algo from Docker:"
|
||||
echo ""
|
||||
echo "docker run --cap-drop=all -it -v <path to configurations>:"${DATA_DIR}" trailofbits/algo:latest"
|
||||
echo "docker run --cap-drop=all -it -v <path to configurations>:"${DATA_DIR}" ghcr.io/trailofbits/algo:latest"
|
||||
echo ""
|
||||
exit ${retcode}
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ fi
|
|||
tr -d '\r' < "${DATA_DIR}"/config.cfg > "${ALGO_DIR}"/config.cfg
|
||||
test -d "${DATA_DIR}"/configs && rsync -qLktr --delete "${DATA_DIR}"/configs "${ALGO_DIR}"/
|
||||
|
||||
"${ALGO_DIR}"/algo ${ALGO_ARGS}
|
||||
"${ALGO_DIR}"/algo "${ALGO_ARGS[@]}"
|
||||
retcode=${?}
|
||||
|
||||
rsync -qLktr --delete "${ALGO_DIR}"/configs "${DATA_DIR}"/
|
||||
|
|
|
@ -68,10 +68,10 @@ elif [[ -f LICENSE && ${STAT} ]]; then
|
|||
fi
|
||||
|
||||
# The Python version might be useful to know.
|
||||
if [[ -x ./env/bin/python ]]; then
|
||||
./env/bin/python --version 2>&1
|
||||
if [[ -x ./.env/bin/python3 ]]; then
|
||||
./.env/bin/python3 --version 2>&1
|
||||
elif [[ -f ./algo ]]; then
|
||||
echo "env/bin/python not found: has 'python -m virtualenv ...' been run?"
|
||||
echo ".env/bin/python3 not found: has 'python3 -m virtualenv ...' been run?"
|
||||
fi
|
||||
|
||||
# Just print out all command line arguments, which are expected
|
||||
|
|
|
@ -6,6 +6,7 @@ host_key_checking = False
|
|||
timeout = 60
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
force_valid_group_names = ignore
|
||||
|
||||
[paramiko_connection]
|
||||
record_host_keys = False
|
||||
|
|
172
config.cfg
172
config.cfg
|
@ -1,50 +1,37 @@
|
|||
---
|
||||
|
||||
# This is the list of users to generate.
|
||||
# Every device must have a unique username.
|
||||
# You can generate up to 250 users at one time.
|
||||
# Every device must have a unique user.
|
||||
# You can add up to 65,534 new users over the lifetime of an AlgoVPN.
|
||||
# User names with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123".
|
||||
# Email addresses are not allowed.
|
||||
users:
|
||||
- phone
|
||||
- laptop
|
||||
- desktop
|
||||
|
||||
### Advanced users only below this line ###
|
||||
### Review these options BEFORE you run Algo, as they are very difficult/impossible to change after the server is deployed.
|
||||
|
||||
# Store the PKI in a ram disk. Enabled only if store_pki (retain the PKI) is set to false
|
||||
# Supports on MacOS and Linux only (including Windows Subsystem for Linux)
|
||||
pki_in_tmpfs: true
|
||||
|
||||
# If True re-init all existing certificates. Boolean
|
||||
keys_clean_all: False
|
||||
|
||||
# Clean up cloud python environments
|
||||
clean_environment: false
|
||||
# Change default SSH port for the cloud roles only
|
||||
# It doesn't apply if you deploy to your existing Ubuntu Server
|
||||
ssh_port: 4160
|
||||
|
||||
# Deploy StrongSwan to enable IPsec support
|
||||
ipsec_enabled: true
|
||||
|
||||
# StrongSwan log level
|
||||
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration
|
||||
strongswan_log_level: 2
|
||||
|
||||
# rightsourceip for ipsec
|
||||
# ipv4
|
||||
strongswan_network: 10.19.48.0/24
|
||||
# ipv6
|
||||
strongswan_network_ipv6: 'fd9d:bc11:4020::/48'
|
||||
|
||||
# Deploy WireGuard
|
||||
# WireGuard will listen on 51820/UDP. You might need to change to another port
|
||||
# if your network blocks this one. Be aware that 53/UDP (DNS) is blocked on some
|
||||
# mobile data networks.
|
||||
wireguard_enabled: true
|
||||
wireguard_port: 51820
|
||||
# If you're behind NAT or a firewall and you want to receive incoming connections long after network traffic has gone silent.
|
||||
# This option will keep the "connection" open in the eyes of NAT.
|
||||
# See: https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence
|
||||
wireguard_PersistentKeepalive: 0
|
||||
|
||||
# WireGuard network configuration
|
||||
wireguard_network_ipv4: 10.19.49.0/24
|
||||
wireguard_network_ipv6: fd9d:bc11:4021::/48
|
||||
# This feature allows you to configure the Algo server to send outbound traffic
|
||||
# through a different external IP address than the one you are establishing the VPN connection with.
|
||||
# More info https://trailofbits.github.io/algo/cloud-alternative-ingress-ip.html
|
||||
# Available for the following cloud providers:
|
||||
# - DigitalOcean
|
||||
alternative_ingress_ip: false
|
||||
|
||||
# Reduce the MTU of the VPN tunnel
|
||||
# Some cloud and internet providers use a smaller MTU (Maximum Transmission
|
||||
|
@ -62,13 +49,35 @@ reduce_mtu: 0
|
|||
# /etc/systemd/system/dnsmasq.service.d/100-CustomLimitations.conf
|
||||
adblock_lists:
|
||||
- "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts"
|
||||
- "https://hosts-file.net/ad_servers.txt"
|
||||
|
||||
# Enable DNS encryption.
|
||||
# If 'false', 'dns_servers' should be specified below.
|
||||
# DNS encryption can not be disabled if DNS adblocking is enabled
|
||||
dns_encryption: true
|
||||
|
||||
# Block traffic between connected clients. Change this to false to enable
|
||||
# connected clients to reach each other, as well as other computers on the
|
||||
# same LAN as your Algo server (i.e. the "road warrior" setup). In this
|
||||
# case, you may also want to enable SMB/CIFS and NETBIOS traffic below.
|
||||
BetweenClients_DROP: true
|
||||
|
||||
# Block SMB/CIFS traffic
|
||||
block_smb: true
|
||||
|
||||
# Block NETBIOS traffic
|
||||
block_netbios: true
|
||||
|
||||
# Your Algo server will automatically install security updates. Some updates
|
||||
# require a reboot to take effect but your Algo server will not reboot itself
|
||||
# automatically unless you change 'enabled' below from 'false' to 'true', in
|
||||
# which case a reboot will take place if necessary at the time specified (as
|
||||
# HH:MM) in the time zone of your Algo server. The default time zone is UTC.
|
||||
unattended_reboot:
|
||||
enabled: false
|
||||
time: 06:00
|
||||
|
||||
### Advanced users only below this line ###
|
||||
|
||||
# DNS servers which will be used if 'dns_encryption' is 'true'. Multiple
|
||||
# providers may be specified, but avoid mixing providers that filter results
|
||||
# (like Cisco) with those that don't (like Cloudflare) or you could get
|
||||
|
@ -79,10 +88,17 @@ dnscrypt_servers:
|
|||
ipv4:
|
||||
- cloudflare
|
||||
# - google
|
||||
# - <YourCustomServer> # E.g., if using NextDNS, this will be something like NextDNS-abc123.
|
||||
# You must also fill in custom_server_stamps below. You may specify
|
||||
# multiple custom servers.
|
||||
ipv6:
|
||||
- cloudflare-ipv6
|
||||
|
||||
custom_server_stamps:
|
||||
# YourCustomServer: 'sdns://...'
|
||||
|
||||
# DNS servers which will be used if 'dns_encryption' is 'false'.
|
||||
# Fallback resolvers for systemd-resolved
|
||||
# The default is to use Cloudflare.
|
||||
dns_servers:
|
||||
ipv4:
|
||||
|
@ -92,21 +108,38 @@ dns_servers:
|
|||
- 2606:4700:4700::1111
|
||||
- 2606:4700:4700::1001
|
||||
|
||||
# Store the PKI in a ram disk. Enabled only if store_pki (retain the PKI) is set to false
|
||||
# Supports on MacOS and Linux only (including Windows Subsystem for Linux)
|
||||
pki_in_tmpfs: true
|
||||
|
||||
# Set this to 'true' when running './algo update-users' if you want ALL users to get new certs, not just new users.
|
||||
keys_clean_all: false
|
||||
|
||||
# StrongSwan log level
|
||||
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration
|
||||
strongswan_log_level: 2
|
||||
|
||||
# rightsourceip for ipsec
|
||||
# ipv4
|
||||
strongswan_network: 10.48.0.0/16
|
||||
# ipv6
|
||||
strongswan_network_ipv6: '2001:db8:4160::/48'
|
||||
|
||||
# If you're behind NAT or a firewall and you want to receive incoming connections long after network traffic has gone silent.
|
||||
# This option will keep the "connection" open in the eyes of NAT.
|
||||
# See: https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence
|
||||
wireguard_PersistentKeepalive: 0
|
||||
|
||||
# WireGuard network configuration
|
||||
wireguard_network_ipv4: 10.49.0.0/16
|
||||
wireguard_network_ipv6: 2001:db8:a160::/48
|
||||
|
||||
# Randomly generated IP address for the local dns resolver
|
||||
local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
|
||||
local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
|
||||
|
||||
# Your Algo server will automatically install security updates. Some updates
|
||||
# require a reboot to take effect but your Algo server will not reboot itself
|
||||
# automatically unless you change 'enabled' below from 'false' to 'true', in
|
||||
# which case a reboot will take place if necessary at the time specified (as
|
||||
# HH:MM) in the time zone of your Algo server. The default time zone is UTC.
|
||||
unattended_reboot:
|
||||
enabled: false
|
||||
time: 06:00
|
||||
|
||||
# Block traffic between connected clients
|
||||
BetweenClients_DROP: true
|
||||
# Hide sensitive data
|
||||
no_log: true
|
||||
|
||||
congrats:
|
||||
common: |
|
||||
|
@ -121,48 +154,73 @@ congrats:
|
|||
ca_key_pass: |
|
||||
"# The CA key password is {{ CA_password|default(omit) }} #"
|
||||
ssh_access: |
|
||||
"# Shell access: ssh -i {{ ansible_ssh_private_key_file|default(omit) }} {{ ansible_ssh_user|default(omit) }}@{{ ansible_ssh_host|default(omit) }} #"
|
||||
"# Shell access: ssh -F configs/{{ ansible_ssh_host|default(omit) }}/ssh_config {{ algo_server_name }} #"
|
||||
|
||||
SSH_keys:
|
||||
comment: algo@ssh
|
||||
private: configs/algo.pem
|
||||
private_tmp: /tmp/algo-ssh.pem
|
||||
public: configs/algo.pem.pub
|
||||
|
||||
cloud_providers:
|
||||
azure:
|
||||
size: Standard_B1S
|
||||
image: 19.04
|
||||
osDisk:
|
||||
# The storage account type to use for the OS disk. Possible values:
|
||||
# 'Standard_LRS', 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS',
|
||||
# 'Premium_ZRS', 'StandardSSD_ZRS', 'PremiumV2_LRS'.
|
||||
type: Standard_LRS
|
||||
image:
|
||||
publisher: Canonical
|
||||
offer: 0001-com-ubuntu-minimal-jammy-daily
|
||||
sku: minimal-22_04-daily-lts
|
||||
version: latest
|
||||
digitalocean:
|
||||
# See docs for extended droplet options, pricing, and availability.
|
||||
# Possible values: 's-1vcpu-512mb-10gb', 's-1vcpu-1gb', ...
|
||||
size: s-1vcpu-1gb
|
||||
image: "ubuntu-19-04-x64"
|
||||
image: "ubuntu-22-04-x64"
|
||||
ec2:
|
||||
# Change the encrypted flag to "true" to enable AWS volume encryption, for encryption of data at rest.
|
||||
# Warning: the Algo script will take approximately 6 minutes longer to complete.
|
||||
encrypted: false
|
||||
# Change the encrypted flag to "false" to disable AWS volume encryption.
|
||||
encrypted: true
|
||||
# Set use_existing_eip to "true" if you want to use a pre-allocated Elastic IP
|
||||
# Additional prompt will be raised to determine which IP to use
|
||||
use_existing_eip: false
|
||||
size: t2.micro
|
||||
image:
|
||||
name: "ubuntu-disco-19.04"
|
||||
name: "ubuntu-jammy-22.04"
|
||||
arch: x86_64
|
||||
owner: "099720109477"
|
||||
# Change instance_market_type from "on-demand" to "spot" to launch a spot
|
||||
# instance. See deploy-from-ansible.md for spot's additional IAM permission
|
||||
instance_market_type: on-demand
|
||||
gce:
|
||||
size: f1-micro
|
||||
image: ubuntu-1904
|
||||
size: e2-micro
|
||||
image: ubuntu-2204-lts
|
||||
external_static_ip: false
|
||||
lightsail:
|
||||
size: nano_1_0
|
||||
image: ubuntu_18_04
|
||||
size: nano_2_0
|
||||
image: ubuntu_22_04
|
||||
scaleway:
|
||||
size: START1-S
|
||||
image: Ubuntu Bionic Beaver
|
||||
size: DEV1-S
|
||||
image: Ubuntu 22.04 Jammy Jellyfish
|
||||
arch: x86_64
|
||||
hetzner:
|
||||
server_type: cx22
|
||||
image: ubuntu-22.04
|
||||
openstack:
|
||||
flavor_ram: ">=512"
|
||||
image: Ubuntu-18.04
|
||||
image: Ubuntu-22.04
|
||||
cloudstack:
|
||||
size: Micro
|
||||
image: Linux Ubuntu 22.04 LTS 64-bit
|
||||
disk: 10
|
||||
vultr:
|
||||
os: Ubuntu 19.04 x64
|
||||
size: 1024 MB RAM,25 GB SSD,1.00 TB BW
|
||||
os: Ubuntu 22.04 LTS x64
|
||||
size: vc2-1c-1gb
|
||||
linode:
|
||||
type: g6-nanode-1
|
||||
image: linode/ubuntu22.04
|
||||
local:
|
||||
|
||||
fail_hint:
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
ansible_ssh_user: "{{ 'root' if client_ip == 'localhost' else ssh_user }}"
|
||||
vpn_user: "{{ vpn_user }}"
|
||||
IP_subject_alt_name: "{{ server_ip }}"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
- name: Configure the client and install required software
|
||||
hosts: client-host
|
||||
|
|
37
docs/client-linux-ipsec.md
Normal file
37
docs/client-linux-ipsec.md
Normal file
|
@ -0,0 +1,37 @@
|
|||
# Linux strongSwan IPsec Clients (e.g., OpenWRT, Ubuntu Server, etc.)
|
||||
|
||||
Install strongSwan, then copy the included ipsec_user.conf, ipsec_user.secrets, user.crt (user certificate), and user.key (private key) files to your client device. These will require customization based on your exact use case. These files were originally generated with a point-to-point OpenWRT-based VPN in mind.
|
||||
|
||||
## Ubuntu Server example
|
||||
|
||||
1. `sudo apt-get install strongswan libstrongswan-standard-plugins`: install strongSwan
|
||||
2. `/etc/ipsec.d/certs`: copy `<name>.crt` from `algo-master/configs/<server_ip>/ipsec/.pki/certs/<name>.crt`
|
||||
3. `/etc/ipsec.d/private`: copy `<name>.key` from `algo-master/configs/<server_ip>/ipsec/.pki/private/<name>.key`
|
||||
4. `/etc/ipsec.d/cacerts`: copy `cacert.pem` from `algo-master/configs/<server_ip>/ipsec/manual/cacert.pem`
|
||||
5. `/etc/ipsec.secrets`: add your `user.key` to the list, e.g. `<server_ip> : ECDSA <name>.key`
|
||||
6. `/etc/ipsec.conf`: add the connection from `ipsec_user.conf` and ensure `leftcert` matches the `<name>.crt` filename
|
||||
7. `sudo ipsec restart`: pick up config changes
|
||||
8. `sudo ipsec up <conn-name>`: start the ipsec tunnel
|
||||
9. `sudo ipsec down <conn-name>`: shutdown the ipsec tunnel
|
||||
|
||||
One common use case is to let your server access your local LAN without going through the VPN. Set up a passthrough connection by adding the following to `/etc/ipsec.conf`:
|
||||
|
||||
conn lan-passthrough
|
||||
leftsubnet=192.168.1.1/24 # Replace with your LAN subnet
|
||||
rightsubnet=192.168.1.1/24 # Replace with your LAN subnet
|
||||
authby=never # No authentication necessary
|
||||
type=pass # passthrough
|
||||
auto=route # no need to ipsec up lan-passthrough
|
||||
|
||||
To configure the connection to come up at boot time replace `auto=add` with `auto=start`.
|
||||
|
||||
## Notes on SELinux
|
||||
|
||||
If you use a system with SELinux enabled you might need to set appropriate file contexts:
|
||||
|
||||
````
|
||||
semanage fcontext -a -t ipsec_key_file_t "$(pwd)(/.*)?"
|
||||
restorecon -R -v $(pwd)
|
||||
````
|
||||
|
||||
See [this comment](https://github.com/trailofbits/algo/issues/263#issuecomment-328053950).
|
|
@ -2,16 +2,16 @@
|
|||
|
||||
## Install WireGuard
|
||||
|
||||
To connect to your AlgoVPN using [WireGuard](https://www.wireguard.com) from Ubuntu, first install WireGuard:
|
||||
To connect to your AlgoVPN using [WireGuard](https://www.wireguard.com) from Ubuntu, make sure your system is up-to-date then install WireGuard:
|
||||
|
||||
```shell
|
||||
# Add the WireGuard repository:
|
||||
sudo add-apt-repository ppa:wireguard/wireguard
|
||||
# Update your system:
|
||||
sudo apt update && sudo apt upgrade
|
||||
|
||||
# Update the list of available packages (not necessary on 18.04 or later):
|
||||
sudo apt update
|
||||
# If the file /var/run/reboot-required exists then reboot:
|
||||
[ -e /var/run/reboot-required ] && sudo reboot
|
||||
|
||||
# Install the tools and kernel module:
|
||||
# Install WireGuard:
|
||||
sudo apt install wireguard openresolv
|
||||
```
|
||||
|
||||
|
@ -47,3 +47,16 @@ sudo systemctl enable wg-quick@wg0
|
|||
```
|
||||
|
||||
If your Linux distribution does not use `systemd` you can bring up WireGuard with `sudo wg-quick up wg0`.
|
||||
|
||||
## Using a DNS Search Domain
|
||||
|
||||
As of the `v1.0.20200510` release of `wireguard-tools` WireGuard supports setting a DNS search domain. In your `wg0.conf` file a non-numeric entry on the `DNS` line will be used as a search domain. For example this:
|
||||
```
|
||||
DNS = 172.27.153.31, fd00::b:991f, mydomain.com
|
||||
```
|
||||
will cause your `/etc/resolv.conf` to contain:
|
||||
```
|
||||
search mydomain.com
|
||||
nameserver 172.27.153.31
|
||||
nameserver fd00::b:991f
|
||||
```
|
||||
|
|
88
docs/client-openwrt-router-wireguard.md
Normal file
88
docs/client-openwrt-router-wireguard.md
Normal file
|
@ -0,0 +1,88 @@
|
|||
# Using Router with OpenWRT as a Client with WireGuard
|
||||
This scenario is useful in case you want to use vpn with devices which has no vpn capability like smart tv, or make vpn connection available via router for multiple devices.
|
||||
This is a tested, working scenario with following environment:
|
||||
|
||||
- algo installed ubuntu at digitalocean
|
||||
- client side router "TP-Link TL-WR1043ND" with openwrt ver. 21.02.1. [Openwrt Install instructions](https://openwrt.org/toh/tp-link/tl-wr1043nd)
|
||||
- or client side router "TP-Link Archer C20i AC750" with openwrt ver. 21.02.1. [Openwrt install instructions](https://openwrt.org/toh/tp-link/archer_c20i)
|
||||
see compatible device list at https://openwrt.org/toh/start . Theoretically any of the device on list should work
|
||||
|
||||
|
||||
|
||||
## Router setup
|
||||
Make sure that you have
|
||||
- router with openwrt installed,
|
||||
- router is connected to internet,
|
||||
- router and device in front of router does not have same ip . By default openwrt have 192.168.1.1 if so change it to something like 192.168.2.1
|
||||
### Install required packages(WebUI)
|
||||
- Open router web UI (mostly http://192.168.1.1 )
|
||||
- Login. (by default username: root, password:<empty>
|
||||
- System -> Software, click "Update lists"
|
||||
- Install following packages wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5
|
||||
- restart router
|
||||
|
||||
### Alternative Install required packages(ssh)
|
||||
- Open router web UI (mostly http://192.168.1.1 )
|
||||
- ssh root@192.168.1.1
|
||||
- opkg update
|
||||
- opkg install wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5
|
||||
- reboot
|
||||
|
||||
### Create an Interface(WebUI)
|
||||
- Open router web UI
|
||||
- Navigate Network -> Interface
|
||||
- Click "Add new interface"
|
||||
- Give a Name. e.g. `AlgoVpn`
|
||||
- Select Protocol. `Wireguard VPN`
|
||||
- click `Create Interface`
|
||||
- In *General Settings* tab
|
||||
- `Bring up on boot` *checked*
|
||||
- Private key: `Interface -> Private Key` from algo config file
|
||||
- Ip Address: `Interface -> Address` from algo config file
|
||||
- In *Peers* tab
|
||||
- Click add
|
||||
- Name `algo`
|
||||
- Public key: `[Peer]->PublicKey` from algo config file
|
||||
- Preshared key: `[Peer]->PresharedKey` from algo config file
|
||||
- Allowed IPs: 0.0.0.0/0
|
||||
- Route Allowed IPs: checked
|
||||
- Endpoint Host: `[Peer]->Endpoint` ip from algo config file
|
||||
- Endpoint Port: `[Peer]->Endpoint` port from algo config file
|
||||
- Persistent Keep Alive: `25`
|
||||
- Click Save & Save Apply
|
||||
|
||||
### Configure Firewall(WebUI)
|
||||
- Open router web UI
|
||||
- Navigate to Network -> Firewall
|
||||
- Click `Add configuration`:
|
||||
- Name: e.g. ivpn_fw
|
||||
- Input: Reject
|
||||
- Output: Accept
|
||||
- Forward: Reject
|
||||
- Masquerading: Checked
|
||||
- MSS clamping: Checked
|
||||
- Covered networks: Select created VPN interface
|
||||
- Allow forward to destination zones - Unspecified
|
||||
- Allow forward from source zones - lan
|
||||
- Click Save & Save Apply
|
||||
- Reboot router
|
||||
|
||||
|
||||
There may be additional configuration required depending on environment like dns configuration.
|
||||
|
||||
You can also verify the configuration using ssh. /etc/config/network. It should look like
|
||||
|
||||
```
|
||||
config interface 'algo'
|
||||
option proto 'wireguard'
|
||||
list addresses '10.0.0.2/32'
|
||||
option private_key '......' # The private key generated by itself just now
|
||||
|
||||
config wireguard_wg0
|
||||
option public_key '......' # Server's public key
|
||||
option route_allowed_ips '1'
|
||||
list allowed_ips '0.0.0.0/0'
|
||||
option endpoint_host '......' # Server's public ip address
|
||||
option endpoint_port '51820'
|
||||
option persistent_keepalive '25'
|
||||
```
|
|
@ -1,6 +0,0 @@
|
|||
# Windows client setup
|
||||
|
||||
## Installation via profiles
|
||||
|
||||
1. Install the [WireGuard VPN Client](https://www.wireguard.com/install/#windows-7-8-81-10-2012-2016-2019) and start it.
|
||||
2. Import the corresponding `wireguard/<name>.conf` file to your device, then setup a new connection with it.
|
22
docs/cloud-alternative-ingress-ip.md
Normal file
22
docs/cloud-alternative-ingress-ip.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
# Alternative Ingress IP
|
||||
|
||||
This feature allows you to configure the Algo server to send outbound traffic through a different external IP address than the one you are establishing the VPN connection with.
|
||||
|
||||

|
||||
|
||||
Additional info might be found in [this issue](https://github.com/trailofbits/algo/issues/1047)
|
||||
|
||||
|
||||
|
||||
|
||||
#### Caveats
|
||||
|
||||
##### Extra charges
|
||||
|
||||
- DigitalOcean: Floating IPs are free when assigned to a Droplet, but after manually deleting a Droplet you need to also delete the Floating IP or you'll get charged for it.
|
||||
|
||||
##### IPv6
|
||||
|
||||
Some cloud providers provision a VM with an `/128` address block size. This is the only IPv6 address provided and for outbound and incoming traffic.
|
||||
|
||||
If the provided address block size is bigger, e.g., `/64`, Algo takes a separate address than the one is assigned to the server to send outbound IPv6 traffic.
|
|
@ -6,18 +6,28 @@ Creating an Amazon AWS account requires giving Amazon a phone number that can re
|
|||
|
||||
### Select an EC2 plan
|
||||
|
||||
The cheapest EC2 plan you can choose is the "Free Plan" a.k.a. the "AWS Free Tier." It is only available to new AWS customers, it has limits on usage, and it converts to standard pricing after 12 months (the "introductory period"). After you exceed the usage limits, after the 12 month period, or if you are an existing AWS customer, then you will pay standard pay-as-you-go service prices.
|
||||
The cheapest EC2 plan you can choose is the "Free Plan" a.k.a. the ["AWS Free Tier"](https://aws.amazon.com/free/). It is only available to new AWS customers, it has limits on usage, and it converts to standard pricing after 12 months (the "introductory period"). After you exceed the usage limits, after the 12 month period, or if you are an existing AWS customer, then you will pay standard pay-as-you-go service prices.
|
||||
|
||||
*Note*: Your Algo instance will not stop working when you hit the bandwidth limit, you will just start accumulating service charges on your AWS account.
|
||||
|
||||
As of the time of this writing (July 2018), the Free Tier limits include "750 hours of Amazon EC2 Linux t2.micro instance usage" per month, 15 GB of bandwidth (outbound) per month, and 30 GB of cloud storage. Algo will not even use 1% of the storage limit, but you may have to monitor your bandwidth usage or keep an eye out for the email from Amazon when you are about to exceed the Free Tier limits.
|
||||
|
||||
If you are not eligible for the free tier plan or have passed the 12 months of the introductory period, you can switch to [AWS Graviton](https://aws.amazon.com/ec2/graviton/) instances that are generally cheaper. To use the graviton instances, make the following changes in the ec2 section of your `config.cfg` file:
|
||||
* Set the `size` to `t4g.nano`
|
||||
* Set the `arch` to `arm64`
|
||||
|
||||
> Currently, among all the instance sizes available on AWS, the t4g.nano instance is the least expensive option that does not require any promotional offers. However, AWS is currently running a promotion that provides a free trial of the `t4g.small` instance until December 31, 2023, which is available to all customers. For more information about this promotion, please refer to the [documentation](https://aws.amazon.com/ec2/faqs/#t4g-instances).
|
||||
|
||||
Additional configurations are documented in the [EC2 section of the deploy from ansible guide](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#amazon-ec2)
|
||||
|
||||
### Create an AWS permissions policy
|
||||
|
||||
In the AWS console, find the policies menu: click Services > IAM > Policies. Click Create Policy.
|
||||
|
||||
Here, you have the policy editor. Switch to the JSON tab and copy-paste over the existing empty policy with [the minimum required AWS policy needed for Algo deployment](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#minimum-required-iam-permissions-for-deployment).
|
||||
|
||||
When prompted to name the policy, name it `AlgoVPN_Provisioning`.
|
||||
|
||||

|
||||
|
||||
### Set up an AWS user
|
||||
|
@ -48,22 +58,27 @@ On the final screen, click the Download CSV button. This file includes the AWS a
|
|||
|
||||
After you have downloaded Algo and installed its dependencies, the next step is running Algo to provision the VPN server on your AWS account.
|
||||
|
||||
First you will be asked which server type to setup. You would want to enter "2" to use Amazon EC2.
|
||||
First you will be asked which server type to setup. You would want to enter "3" to use Amazon EC2.
|
||||
|
||||
```
|
||||
$ ./algo
|
||||
|
||||
What provider would you like to use?
|
||||
1. DigitalOcean
|
||||
2. Amazon EC2
|
||||
3. Microsoft Azure
|
||||
4. Google Compute Engine
|
||||
5. Scaleway
|
||||
6. OpenStack (DreamCompute optimised)
|
||||
7. Install to existing Ubuntu 16.04 server (Advanced)
|
||||
2. Amazon Lightsail
|
||||
3. Amazon EC2
|
||||
4. Microsoft Azure
|
||||
5. Google Compute Engine
|
||||
6. Hetzner Cloud
|
||||
7. Vultr
|
||||
8. Scaleway
|
||||
9. OpenStack (DreamCompute optimised)
|
||||
10. CloudStack (Exoscale optimised)
|
||||
11. Linode
|
||||
12. Install to existing Ubuntu server (for more advanced users)
|
||||
|
||||
Enter the number of your desired provider
|
||||
: 2
|
||||
: 3
|
||||
```
|
||||
|
||||
Next you will be asked for the AWS Access Key (Access Key ID) and AWS Secret Key (Secret Access Key) that you received in the CSV file when you setup the account (don't worry if you don't see your text entered in the console; the key input is hidden here by Algo).
|
||||
|
@ -116,4 +131,5 @@ Enter the number of your desired region
|
|||
You will then be asked the remainder of the standard Algo setup questions.
|
||||
|
||||
## Cleanup
|
||||
|
||||
If you've installed Algo onto EC2 multiple times, your AWS account may become cluttered with unused or deleted resources e.g. instances, VPCs, subnets, etc. This may cause future installs to fail. The easiest way to clean up after you're done with a server is to go to "CloudFormation" from the console and delete the CloudFormation stack associated with that server. Please note that unless you've enabled termination protection on your instance, deleting the stack this way will delete your instance without warning, so be sure you are deleting the correct stack.
|
||||
|
|
11
docs/cloud-cloudstack.md
Normal file
11
docs/cloud-cloudstack.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
### Configuration file
|
||||
|
||||
Algo scripts will ask you for the API detail. You need to fetch the API credentials and the endpoint from the provider control panel.
|
||||
|
||||
Example for Exoscale (European cloud provider exposing CloudStack API), visit https://portal.exoscale.com/u/<your@account>/account/profile/api to gather the required information: CloudStack api key and secret.
|
||||
|
||||
```bash
|
||||
export CLOUDSTACK_KEY="<your api key>"
|
||||
export CLOUDSTACK_SECRET="<your secret>"
|
||||
export CLOUDSTACK_ENDPOINT="https://api.exoscale.com/compute"
|
||||
```
|
|
@ -18,6 +18,18 @@ You will be returned to the **Tokens/Keys** tab, and your new key will be shown
|
|||
|
||||
Copy or note down the hash that shows below the name you entered, as this will be necessary for the steps below. This value will disappear if you leave this page, and you'll need to regenerate it if you forget it.
|
||||
|
||||
## Select a Droplet (optional)
|
||||
|
||||
The default option is the `s-1vcpu-1gb` because it is available in all regions. However, you may want to switch to a cheaper droplet such as `s-1vcpu-512mb-10gb` even though it is not available in all regions. This can be edited in the [Configuration File](config.cfg) under `cloud_providers > digitalocean > size`. See this brief comparison between the two droplets below:
|
||||
|
||||
| Droplet Type | Monthly Cost | Bandwidth | Availability |
|
||||
|:--|:-:|:-:|:--|
|
||||
| `s-1vcpu-512mb-10gb` | $4/month | 0.5 TB | Limited |
|
||||
| `s-1vcpu-1gb` | $6/month | 1.0 TB | All regions |
|
||||
| ... | ... | ... | ... |
|
||||
|
||||
*Note: Exceeding bandwidth limits costs $0.01/GiB at time of writing ([docs](https://docs.digitalocean.com/products/billing/bandwidth/#droplets)). See the live list of droplets [here](https://slugs.do-api.dev/).*
|
||||
|
||||
## Using DigitalOcean with Algo (interactive)
|
||||
|
||||
These steps are for those who run Algo using Docker or using the `./algo` command.
|
||||
|
|
|
@ -38,4 +38,4 @@ gcloud services enable compute.googleapis.com
|
|||
**Attention:** take care of the `configs/gce.json` file, which contains the credentials to manage your Google Cloud account, including create and delete servers on this project.
|
||||
|
||||
|
||||
There are more advanced arguments available for deploynment [using ansible](deploy-from-ansible.md).
|
||||
There are more advanced arguments available for deployment [using ansible](deploy-from-ansible.md).
|
||||
|
|
3
docs/cloud-hetzner.md
Normal file
3
docs/cloud-hetzner.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
## API Token
|
||||
|
||||
Sign in into the [Hetzner Cloud Console](https://console.hetzner.cloud/) choose a project, go to `Security` → `API Tokens`, and `Generate API Token` with `Read & Write` access. Make sure to copy the token because it won’t be shown to you again. A token is bound to a project. To interact with the API of another project you have to create a new token inside the project.
|
9
docs/cloud-linode.md
Normal file
9
docs/cloud-linode.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
## API Token
|
||||
|
||||
Sign into the Linode Manager and go to the
|
||||
[tokens management page](https://cloud.linode.com/profile/tokens).
|
||||
|
||||
Click `Add a Personal Access Token`. Label your new token and select *at least* the
|
||||
`Linodes` read/write permission and `StackScripts` read/write permission.
|
||||
Press `Submit` and make sure to copy the displayed token
|
||||
as it won't be shown again.
|
|
@ -1,9 +1,10 @@
|
|||
### Configuration file
|
||||
|
||||
Algo requires an API key from your Scaleway account to create a server.
|
||||
The API key is generated by going to your Scaleway credentials at [https://console.scaleway.com/account/credentials](https://console.scaleway.com/account/credentials), and then selecting "Generate new token" on the right side of the box labeled "API Tokens".
|
||||
The API key is generated by going to your Scaleway credentials at [https://console.scaleway.com/project/credentials](https://console.scaleway.com/project/credentials), and then selecting "Generate new API key" on the right side of the box labeled "API Keys".
|
||||
You'll be ask for to specify a purpose for your API key before it is created. You will then be presented and "Access key" and a "Secret key".
|
||||
|
||||
Enter this token when Algo prompts you for the `auth token`.
|
||||
Enter the "Secret key" when Algo prompts you for the `auth token`. You won't need the "Access key".
|
||||
This information will be pass as the `algo_scaleway_token` variable when asked for in the Algo prompt.
|
||||
|
||||
Your organization ID is also on this page: https://console.scaleway.com/account/credentials
|
||||
|
|
|
@ -26,12 +26,12 @@ See below for more information about variables and roles.
|
|||
|
||||
- `provider` - (Required) The provider to use. See possible values below
|
||||
- `server_name` - (Required) Server name. Default: algo
|
||||
- `ondemand_cellular` (Optional) VPN On Demand when connected to cellular networks with IPsec. Default: false
|
||||
- `ondemand_wifi` - (Optional. See `ondemand_wifi_exclude`) VPN On Demand when connected to WiFi networks with IPsec. Default: false
|
||||
- `ondemand_cellular` (Optional) Enables VPN On Demand when connected to cellular networks for iOS/macOS clients using IPsec. Default: false
|
||||
- `ondemand_wifi` - (Optional. See `ondemand_wifi_exclude`) Enables VPN On Demand when connected to WiFi networks for iOS/macOS clients using IPsec. Default: false
|
||||
- `ondemand_wifi_exclude` (Required if `ondemand_wifi` set) - WiFi networks to exclude from using the VPN. Comma-separated values
|
||||
- `dns_adblocking` - (Optional) Enables dnscrypt-proxy adblocking. Default: false
|
||||
- `ssh_tunneling` - (Optional) Enable SSH tunneling for each user. Default: false
|
||||
- `store_cakey` - (Optional) Whether or not keep the CA key (required to add users in the future, but less secure). Default: false
|
||||
- `store_pki` - (Optional) Whether or not keep the CA key (required to add users in the future, but less secure). Default: false
|
||||
|
||||
If any of the above variables are unspecified, ansible will ask the user to input them.
|
||||
|
||||
|
@ -41,30 +41,34 @@ Cloud roles can be activated by specifying an extra variable `provider`.
|
|||
|
||||
Cloud roles:
|
||||
|
||||
- role: cloud-digitalocean, provider: digitalocean
|
||||
- role: cloud-ec2, provider: ec2
|
||||
- role: cloud-vultr, provider: vultr
|
||||
- role: cloud-gce, provider: gce
|
||||
- role: cloud-azure, provider: azure
|
||||
- role: cloud-scaleway, provider: scaleway
|
||||
- role: cloud-openstack, provider: openstack
|
||||
- role: cloud-digitalocean, [provider: digitalocean](#digital-ocean)
|
||||
- role: cloud-ec2, [provider: ec2](#amazon-ec2)
|
||||
- role: cloud-gce, [provider: gce](#google-compute-engine)
|
||||
- role: cloud-vultr, [provider: vultr](#vultr)
|
||||
- role: cloud-azure, [provider: azure](#azure)
|
||||
- role: cloud-lightsail, [provider: lightsail](#lightsail)
|
||||
- role: cloud-scaleway, [provider: scaleway](#scaleway)
|
||||
- role: cloud-openstack, [provider: openstack](#openstack)
|
||||
- role: cloud-cloudstack, [provider: cloudstack](#cloudstack)
|
||||
- role: cloud-hetzner, [provider: hetzner](#hetzner)
|
||||
- role: cloud-linode, [provider: linode](#linode)
|
||||
|
||||
Server roles:
|
||||
|
||||
- role: strongswan
|
||||
* Installs [strongSwan](https://www.strongswan.org/)
|
||||
* Enables AppArmor, limits CPU and memory access, and drops user privileges
|
||||
* Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user
|
||||
* Bundles the appropriate certificates into Apple mobileconfig profiles for each user
|
||||
- Installs [strongSwan](https://www.strongswan.org/)
|
||||
- Enables AppArmor, limits CPU and memory access, and drops user privileges
|
||||
- Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user
|
||||
- Bundles the appropriate certificates into Apple mobileconfig profiles for each user
|
||||
- role: dns_adblocking
|
||||
* Installs DNS encryption through [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy) with blacklists to be updated daily from `adblock_lists` in `config.cfg` - note this will occur even if `dns_encryption` in `config.cfg` is set to `false`
|
||||
* Constrains dnscrypt-proxy with AppArmor and cgroups CPU and memory limitations
|
||||
- Installs DNS encryption through [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy) with blacklists to be updated daily from `adblock_lists` in `config.cfg` - note this will occur even if `dns_encryption` in `config.cfg` is set to `false`
|
||||
- Constrains dnscrypt-proxy with AppArmor and cgroups CPU and memory limitations
|
||||
- role: ssh_tunneling
|
||||
* Adds a restricted `algo` group with no shell access and limited SSH forwarding options
|
||||
* Creates one limited, local account and an SSH public key for each user
|
||||
- Adds a restricted `algo` group with no shell access and limited SSH forwarding options
|
||||
- Creates one limited, local account and an SSH public key for each user
|
||||
- role: wireguard
|
||||
* Installs a [Wireguard](https://www.wireguard.com/) server, with a startup script, and automatic checks for upgrades
|
||||
* Creates wireguard.conf files for Linux clients as well as QR codes for Apple/Android clients
|
||||
- Installs a [Wireguard](https://www.wireguard.com/) server, with a startup script, and automatic checks for upgrades
|
||||
- Creates wireguard.conf files for Linux clients as well as QR codes for Apple/Android clients
|
||||
|
||||
Note: The `strongswan` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables:
|
||||
|
||||
|
@ -92,7 +96,7 @@ Required variables:
|
|||
- do_token
|
||||
- region
|
||||
|
||||
Possible options can be gathered calling to https://api.digitalocean.com/v2/regions
|
||||
Possible options can be gathered calling to <https://api.digitalocean.com/v2/regions>
|
||||
|
||||
### Amazon EC2
|
||||
|
||||
|
@ -106,9 +110,26 @@ Possible options can be gathered via cli `aws ec2 describe-regions`
|
|||
|
||||
Additional variables:
|
||||
|
||||
- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: false)
|
||||
- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: true)
|
||||
- [size](https://aws.amazon.com/ec2/instance-types/) - EC2 instance type. String (Default: t2.micro)
|
||||
- [image](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-images.html) - AMI `describe-images` search parameters to find the OS for the hosted image. Each OS and architecture has a unique AMI-ID. The OS owner, for example [Ubuntu](https://cloud-images.ubuntu.com/locator/ec2/), updates these images often. If parameters below result in multiple results, the most recent AMI-ID is chosen
|
||||
|
||||
#### Minimum required IAM permissions for deployment:
|
||||
```
|
||||
# Example of equivalent cli command
|
||||
aws ec2 describe-images --owners "099720109477" --filters "Name=architecture,Values=arm64" "Name=name,Values=ubuntu/images/hvm-ssd/ubuntu-jammy-22.04*"
|
||||
```
|
||||
|
||||
- [owners] - The operating system owner id. Default is [Canonical](https://help.ubuntu.com/community/EC2StartersGuide#Official_Ubuntu_Cloud_Guest_Amazon_Machine_Images_.28AMIs.29) (Default: 099720109477)
|
||||
- [arch] - The architecture (Default: x86_64, Optional: arm64)
|
||||
- [name] - The wildcard string to filter available ami names. Algo appends this name with the string "-\*64-server-\*", and prepends with "ubuntu/images/hvm-ssd/" (Default: Ubuntu latest LTS)
|
||||
- [instance_market_type](https://aws.amazon.com/ec2/pricing/) - Two pricing models are supported: on-demand and spot. String (Default: on-demand)
|
||||
- If using spot instance types, one additional IAM permission along with the below minimum is required for deployment:
|
||||
|
||||
```
|
||||
"ec2:CreateLaunchTemplate"
|
||||
```
|
||||
|
||||
#### Minimum required IAM permissions for deployment
|
||||
|
||||
```
|
||||
{
|
||||
|
@ -146,14 +167,18 @@ Additional variables:
|
|||
"Sid": "CloudFormationEC2Access",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:DescribeRegions",
|
||||
"ec2:CreateInternetGateway",
|
||||
"ec2:DescribeVpcs",
|
||||
"ec2:CreateVpc",
|
||||
"ec2:DescribeInternetGateways",
|
||||
"ec2:ModifyVpcAttribute",
|
||||
"ec2:createTags",
|
||||
"ec2:CreateTags",
|
||||
"ec2:CreateSubnet",
|
||||
"ec2:Associate*",
|
||||
"ec2:AssociateVpcCidrBlock",
|
||||
"ec2:AssociateSubnetCidrBlock",
|
||||
"ec2:AssociateRouteTable",
|
||||
"ec2:AssociateAddress",
|
||||
"ec2:CreateRouteTable",
|
||||
"ec2:AttachInternetGateway",
|
||||
"ec2:DescribeRouteTables",
|
||||
|
@ -180,8 +205,8 @@ Additional variables:
|
|||
|
||||
Required variables:
|
||||
|
||||
- gce_credentials_file
|
||||
- [region](https://cloud.google.com/compute/docs/regions-zones/)
|
||||
- gce_credentials_file: e.g. /configs/gce.json if you use the [GCE docs](https://trailofbits.github.io/algo/cloud-gce.html) - can also be defined in environment as GCE_CREDENTIALS_FILE_PATH
|
||||
- [region](https://cloud.google.com/compute/docs/regions-zones/): e.g. `useast-1`
|
||||
|
||||
### Vultr
|
||||
|
||||
|
@ -210,7 +235,7 @@ Required variables:
|
|||
|
||||
Possible options can be gathered via cli `aws lightsail get-regions`
|
||||
|
||||
#### Minimum required IAM permissions for deployment:
|
||||
#### Minimum required IAM permissions for deployment
|
||||
|
||||
```
|
||||
{
|
||||
|
@ -223,7 +248,27 @@ Possible options can be gathered via cli `aws lightsail get-regions`
|
|||
"lightsail:GetRegions",
|
||||
"lightsail:GetInstance",
|
||||
"lightsail:CreateInstances",
|
||||
"lightsail:OpenInstancePublicPorts"
|
||||
"lightsail:DisableAddOn",
|
||||
"lightsail:PutInstancePublicPorts",
|
||||
"lightsail:StartInstance",
|
||||
"lightsail:TagResource",
|
||||
"lightsail:GetStaticIp",
|
||||
"lightsail:AllocateStaticIp",
|
||||
"lightsail:AttachStaticIp"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "DeployCloudFormationStack",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"cloudformation:CreateStack",
|
||||
"cloudformation:UpdateStack",
|
||||
"cloudformation:DescribeStacks",
|
||||
"cloudformation:DescribeStackEvents",
|
||||
"cloudformation:ListStackResources"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
|
@ -238,12 +283,36 @@ Possible options can be gathered via cli `aws lightsail get-regions`
|
|||
Required variables:
|
||||
|
||||
- [scaleway_token](https://www.scaleway.com/docs/generate-an-api-token/)
|
||||
- region: e.g. ams1, par1
|
||||
- region: e.g. `ams1`, `par1`
|
||||
|
||||
### OpenStack
|
||||
|
||||
You need to source the rc file prior to run Algo. Download it from the OpenStack dashboard->Compute->API Access and source it in the shell (eg: source /tmp/dhc-openrc.sh)
|
||||
|
||||
### CloudStack
|
||||
|
||||
Required variables:
|
||||
|
||||
- [cs_config](https://trailofbits.github.io/algo/cloud-cloudstack.html): /path/to/.cloudstack.ini
|
||||
- cs_region: e.g. `exoscale`
|
||||
- cs_zones: e.g. `ch-gva2`
|
||||
|
||||
The first two can also be defined in your environment, using the variables `CLOUDSTACK_CONFIG` and `CLOUDSTACK_REGION`.
|
||||
|
||||
### Hetzner
|
||||
|
||||
Required variables:
|
||||
|
||||
- hcloud_token: Your [API token](https://trailofbits.github.io/algo/cloud-hetzner.html#api-token) - can also be defined in the environment as HCLOUD_TOKEN
|
||||
- region: e.g. `nbg1`
|
||||
|
||||
### Linode
|
||||
|
||||
Required variables:
|
||||
|
||||
- linode_token: Your [API token](https://trailofbits.github.io/algo/cloud-linode.html#api-token) - can also be defined in the environment as LINODE_TOKEN
|
||||
- region: e.g. `us-east`
|
||||
|
||||
### Update users
|
||||
|
||||
Playbook:
|
||||
|
|
15
docs/deploy-from-cloudshell.md
Normal file
15
docs/deploy-from-cloudshell.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
# Deploy from Google Cloud Shell
|
||||
|
||||
If you want to try Algo but don't wish to install the software on your own system you can use the **free** [Google Cloud Shell](https://cloud.google.com/shell/) to deploy a VPN to any supported cloud provider. Note that you cannot choose `Install to existing Ubuntu server` to turn Google Cloud Shell into your VPN server.
|
||||
|
||||
1. See the [Cloud Shell documentation](https://cloud.google.com/shell/docs/) to start an instance of Cloud Shell in your browser.
|
||||
|
||||
2. Follow the [Algo installation instructions](https://github.com/trailofbits/algo#deploy-the-algo-server) as shown but skip step **3. Install Algo's core dependencies** as they are already installed. Run Algo to deploy to a supported cloud provider.
|
||||
|
||||
3. Once Algo has completed, retrieve a copy of the configuration files that were created to your local system. While still in the Algo directory, run:
|
||||
```
|
||||
zip -r configs configs
|
||||
dl configs.zip
|
||||
```
|
||||
|
||||
4. Unzip `configs.zip` on your local system and use the files to configure your VPN clients.
|
|
@ -4,9 +4,8 @@ While it is not possible to run your Algo server from within a Docker container,
|
|||
|
||||
## Limitations
|
||||
|
||||
1. [Advanced](deploy-from-ansible.md) installations are not currently supported; you must use the interactive `algo` script.
|
||||
2. This has not yet been tested with user namespacing enabled.
|
||||
3. If you're running this on Windows, take care when editing files under `configs/` to ensure that line endings are set appropriately for Unix systems.
|
||||
1. This has not yet been tested with user namespacing enabled.
|
||||
2. If you're running this on Windows, take care when editing files under `configs/` to ensure that line endings are set appropriately for Unix systems.
|
||||
|
||||
## Deploying an Algo Server with Docker
|
||||
|
||||
|
@ -14,38 +13,74 @@ While it is not possible to run your Algo server from within a Docker container,
|
|||
2. Create a local directory to hold your VPN configs (e.g. `C:\Users\trailofbits\Documents\VPNs\`)
|
||||
3. Create a local copy of [config.cfg](https://github.com/trailofbits/algo/blob/master/config.cfg), with required modifications (e.g. `C:\Users\trailofbits\Documents\VPNs\config.cfg`)
|
||||
4. Run the Docker container, mounting your configurations appropriately (assuming the container is named `trailofbits/algo` with a tag `latest`):
|
||||
|
||||
- From Windows:
|
||||
|
||||
```powershell
|
||||
C:\Users\trailofbits> docker run --cap-drop=all -it \
|
||||
-v C:\Users\trailofbits\Documents\VPNs:/data \
|
||||
trailofbits/algo:latest
|
||||
ghcr.io/trailofbits/algo:latest
|
||||
```
|
||||
|
||||
- From Linux:
|
||||
|
||||
```bash
|
||||
$ docker run --cap-drop=all -it \
|
||||
-v /home/trailofbits/Documents/VPNs:/data \
|
||||
trailofbits/algo:latest
|
||||
ghcr.io/trailofbits/algo:latest
|
||||
```
|
||||
|
||||
5. When it exits, you'll be left with a fully populated `configs` directory, containing all appropriate configuration data for your clients, and for future server management
|
||||
|
||||
### Providing Additional Files
|
||||
f
|
||||
|
||||
If you need to provide additional files -- like authorization files for Google Cloud Project -- you can simply specify an additional `-v` parameter, and provide the appropriate path when prompted by `algo`.
|
||||
|
||||
For example, you can specify `-v C:\Users\trailofbits\Documents\VPNs\gce_auth.json:/algo/gce_auth.json`, making the local path to your credentials JSON file `/algo/gce_auth.json`.
|
||||
|
||||
### Scripted deployment
|
||||
|
||||
Ansible variables (see [Deployment from Ansible](deploy-from-ansible.md)) can be passed via `ALGO_ARGS` environment variable.
|
||||
_The leading `-e` (or `--extra-vars`) is required_, e.g.
|
||||
|
||||
```bash
|
||||
$ ALGO_ARGS="-e
|
||||
provider=digitalocean
|
||||
server_name=algo
|
||||
ondemand_cellular=false
|
||||
ondemand_wifi=false
|
||||
dns_adblocking=true
|
||||
ssh_tunneling=true
|
||||
store_pki=true
|
||||
region=ams3
|
||||
do_token=token"
|
||||
|
||||
$ docker run --cap-drop=all -it \
|
||||
-e "ALGO_ARGS=$ALGO_ARGS" \
|
||||
-v /home/trailofbits/Documents/VPNs:/data \
|
||||
ghcr.io/trailofbits/algo:latest
|
||||
```
|
||||
|
||||
## Managing an Algo Server with Docker
|
||||
|
||||
Even though the container itself is transient, because you've persisted the configuration data, you can use the same Docker image to manage your Algo server. This is done by setting the environment variable `ALGO_ARGS`.
|
||||
|
||||
If you want to use Algo to update the users on an existing server, specify `-e "ALGO_ARGS=update-users"` in your `docker run` command:
|
||||
|
||||
```powershell
|
||||
$ docker run --cap-drop=all -it \
|
||||
-e "ALGO_ARGS=update-users" \
|
||||
-v C:\Users\trailofbits\Documents\VPNs:/data \
|
||||
trailofbits/algo:latest
|
||||
ghcr.io/trailofbits/algo:latest
|
||||
```
|
||||
|
||||
## GNU Makefile for Docker
|
||||
|
||||
You can also build and deploy with a Makefile. This simplifies some of the command strings and opens the door for further user configuration.
|
||||
|
||||
The `Makefile` consists of three targets: `docker-build`, `docker-deploy`, and `docker-prune`.
|
||||
`docker-all` will run thru all of them.
|
||||
|
||||
## Building Your Own Docker Image
|
||||
|
||||
You can use the Dockerfile provided in this repository as-is, or modify it to suit your needs. Further instructions on building an image can be found in the [Docker engine](https://docs.docker.com/engine/) documents.
|
||||
|
|
|
@ -1,115 +0,0 @@
|
|||
# Deploy from Fedora Workstation
|
||||
|
||||
These docs were written based on experience on Fedora Workstation 30.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### DNF counterparts of apt packages
|
||||
|
||||
The following table lists `apt` packages with their `dnf` counterpart. This is purely informative.
|
||||
Using `python2-*` in favour of `python3-*` as per [declared dependency](https://github.com/trailofbits/algo#deploy-the-algo-server).
|
||||
|
||||
| `apt` | `dnf` |
|
||||
| ----- | ----- |
|
||||
| `build-essential` | `make automake gcc gcc-c++ kernel-devel` |
|
||||
| `libssl-dev` | `openssl-devel` |
|
||||
| `libffi-dev` | `libffi-devel` |
|
||||
| `python-dev` | `python2-devel` |
|
||||
| `python-pip` | `python2-pip` |
|
||||
| `python-setuptools` | `python2-setuptools` |
|
||||
| `python-virtualenv` | `python2-virtualenv` |
|
||||
|
||||
### Install requirements
|
||||
|
||||
First, let's make sure our system is up-to-date:
|
||||
|
||||
````
|
||||
dnf upgrade
|
||||
````
|
||||
|
||||
Next, install the required packages:
|
||||
|
||||
````
|
||||
dnf install -y \
|
||||
ansible \
|
||||
automake \
|
||||
gcc \
|
||||
gcc-c++ \
|
||||
kernel-devel \
|
||||
openssl-devel \
|
||||
libffi-devel \
|
||||
libselinux-python \
|
||||
python2-devel \
|
||||
python2-pip \
|
||||
python2-setuptools \
|
||||
python2-virtualenv \
|
||||
python2-crypto \
|
||||
python2-pyyaml \
|
||||
python2-pyOpenSSL \
|
||||
python2-libselinux \
|
||||
make
|
||||
````
|
||||
|
||||
## Get Algo
|
||||
|
||||
|
||||
[Download](https://github.com/trailofbits/algo/archive/master.zip) or clone:
|
||||
|
||||
````
|
||||
git clone git@github.com:trailofbits/algo.git
|
||||
cd algo
|
||||
````
|
||||
|
||||
If you downloaded Algo, unzip to your prefered location and `cd` into it.
|
||||
We'll assume from this point forward that our working directory is the `algo` root directory.
|
||||
|
||||
|
||||
## Prepare algo
|
||||
|
||||
Some steps are needed before we can deploy our Algo VPN server.
|
||||
|
||||
### Check `pip`
|
||||
|
||||
Run `pip -v` and check the python version it is using:
|
||||
|
||||
````
|
||||
$ pip -V
|
||||
pip 19.0.3 from /usr/lib/python2.7/site-packages (python 2.7)
|
||||
````
|
||||
|
||||
`python 2.7` is what we're looking for.
|
||||
|
||||
### Setup virtualenv and install requirements
|
||||
|
||||
````
|
||||
python2 -m virtualenv --system-site-packages env
|
||||
source env/bin/activate
|
||||
pip -q install --user -r requirements.txt
|
||||
````
|
||||
|
||||
## Configure
|
||||
|
||||
Edit the userlist and any other settings you desire in `config.cfg` using your prefered editor.
|
||||
|
||||
## Deploy
|
||||
|
||||
We can now deploy our server by running:
|
||||
|
||||
````
|
||||
./algo
|
||||
````
|
||||
|
||||
Note the IP and password of the newly created Algo VPN server and store it safely.
|
||||
|
||||
If you want to setup client config on your Fedora Workstation, refer to [the Linux Client docs](client-linux.md).
|
||||
|
||||
## Notes on SELinux
|
||||
|
||||
If you have SELinux enabled, you'll need to set appropriate file contexts:
|
||||
|
||||
````
|
||||
semanage fcontext -a -t ipsec_key_file_t "$(pwd)(/.*)?"
|
||||
restorecon -R -v $(pwd)
|
||||
````
|
||||
|
||||
See [this comment](https://github.com/trailofbits/algo/issues/263#issuecomment-328053950).
|
66
docs/deploy-from-macos.md
Normal file
66
docs/deploy-from-macos.md
Normal file
|
@ -0,0 +1,66 @@
|
|||
# Deploy from macOS
|
||||
|
||||
While you can't turn a macOS system in an AlgoVPN, you can install the Algo scripts on a macOS system and use them to deploy your AlgoVPN to a cloud provider.
|
||||
|
||||
Algo uses [Ansible](https://www.ansible.com) which requires Python 3. macOS includes an obsolete version of Python 2 installed as `/usr/bin/python` which you should ignore.
|
||||
|
||||
## macOS 10.15 Catalina
|
||||
|
||||
Catalina comes with Python 3 installed as `/usr/bin/python3`. This file, and certain others like `/usr/bin/git`, start out as stub files that prompt you to install the Command Line Developer Tools package the first time you run them. This is the easiest way to install Python 3 on Catalina.
|
||||
|
||||
Note that Python 3 from Command Line Developer Tools prior to the release for Xcode 11.5 on 2020-05-20 might not work with Algo. If Software Update does not offer to update an older version of the tools you can download a newer version from [here](https://developer.apple.com/download/more/) (Apple ID login required).
|
||||
|
||||
## macOS prior to 10.15 Catalina
|
||||
|
||||
You'll need to install Python 3 before you can run Algo. Python 3 is available from different packagers, two of which are listed below.
|
||||
|
||||
### Ansible and SSL Validation
|
||||
|
||||
Ansible validates SSL network connections using OpenSSL but macOS includes LibreSSL which behaves differently. Therefore each version of Python below includes or depends on its own copy of OpenSSL.
|
||||
|
||||
OpenSSL needs access to a list of trusted CA certificates in order to validate SSL connections. Each packager handles initializing this certificate store differently. If you see the error `CERTIFICATE_VERIFY_FAILED` when running Algo make sure you've followed the packager-specific instructions correctly.
|
||||
|
||||
### Choose a packager and install Python 3
|
||||
|
||||
Choose one of the packagers below as your source for Python 3. Avoid installing versions from multiple packagers on the same Mac as you may encounter conflicts. In particular they might fight over creating symbolic links in `/usr/local/bin`.
|
||||
|
||||
#### Option 1: Install using the Homebrew package manager
|
||||
|
||||
If you're comfortable using the command line in Terminal the [Homebrew](https://brew.sh) project is a great source of software for macOS.
|
||||
|
||||
First install Homebrew using the instructions on the [Homebrew](https://brew.sh) page.
|
||||
|
||||
The install command below takes care of initializing the CA certificate store.
|
||||
|
||||
##### Installation
|
||||
```
|
||||
brew install python3
|
||||
```
|
||||
After installation open a new tab or window in Terminal and verify that the command `which python3` returns `/usr/local/bin/python3`.
|
||||
|
||||
##### Removal
|
||||
```
|
||||
brew uninstall python3
|
||||
```
|
||||
|
||||
#### Option 2: Install the package from Python.org
|
||||
|
||||
If you don't want to install a package manager you can download the Python package for macOS from [python.org](https://www.python.org/downloads/mac-osx/).
|
||||
|
||||
##### Installation
|
||||
|
||||
Download the most recent version of Python and install it like any other macOS package. Then initialize the CA certificate store from Finder by double-clicking on the file `Install Certificates.command` found in the `/Applications/Python 3.8` folder.
|
||||
|
||||
When you double-click on `Install Certificates.command` a new Terminal window will open. If the window remains blank then the command has not run correctly. This can happen if you've changed the default shell in Terminal Preferences. Try changing it back to the default and run `Install Certificates.command` again.
|
||||
|
||||
After installation open a new tab or window in Terminal and verify that the command `which python3` returns either `/usr/local/bin/python3` or `/Library/Frameworks/Python.framework/Versions/3.8/bin/python3`.
|
||||
|
||||
##### Removal
|
||||
|
||||
Unfortunately the python.org package does not include an uninstaller and removing it requires several steps:
|
||||
|
||||
1. In Finder, delete the package folder found in `/Applications`.
|
||||
2. In Finder, delete the *rest* of the package found under ` /Library/Frameworks/Python.framework/Versions`.
|
||||
3. In Terminal, undo the changes to your `PATH` by running:
|
||||
```mv ~/.bash_profile.pysave ~/.bash_profile```
|
||||
4. In Terminal, remove the dozen or so symbolic links the package created in `/usr/local/bin`. Or just leave them because installing another version of Python will overwrite most of them.
|
|
@ -1,86 +0,0 @@
|
|||
# RedHat/CentOS 6.x pre-installation requirements
|
||||
|
||||
Many people prefer RedHat or CentOS 6 (or similar variants like Amazon Linux) for to their stability and lack of systemd. Unfortunately, there are a number of dated libraries, notably Python 2.6, that prevent Algo from running without errors. This script will prepare a RedHat, CentOS, or similar VM to deploy to Algo cloud instances.
|
||||
|
||||
## Step 1: Prep for RH/CentOS 6.8/Amazon
|
||||
|
||||
```shell
|
||||
yum -y -q update
|
||||
yum -y -q install epel-release
|
||||
```
|
||||
|
||||
Enable any kernel updates:
|
||||
|
||||
```shell
|
||||
reboot
|
||||
```
|
||||
|
||||
## Step 2: Install Ansible and launch Algo
|
||||
|
||||
Fix GPG key warnings during Ansible rpm install:
|
||||
|
||||
```shell
|
||||
rpm --import https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-6
|
||||
```
|
||||
|
||||
Fix GPG key warning during official Software Collections (SCL) package install:
|
||||
|
||||
```shell
|
||||
rpm --import https://raw.githubusercontent.com/sclorg/centos-release-scl/master/centos-release-scl/RPM-GPG-KEY-CentOS-SIG-SCLo
|
||||
```
|
||||
|
||||
RedHat/CentOS 6.x uses Python 2.6 by default, which is explicitly deprecated and produces many warnings and errors, so we must install a safe, non-invasive 2.7 tool set which has to be expressly enabled (and will not survive login sessions and reboots):
|
||||
|
||||
```shell
|
||||
# Install the Software Collections Library (to enable Python 2.7)
|
||||
yum -y -q install centos-release-SCL
|
||||
|
||||
# 2.7 will not be used until explicitly enabled, per login session
|
||||
yum -y -q install python27-python-devel python27-python-setuptools python27-python-pip
|
||||
yum -y -q install openssl-devel libffi-devel automake gcc gcc-c++ kernel-devel wget unzip ansible nano
|
||||
|
||||
# Enable 2.7 default for this session (needs re-run between logins & reboots)
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/rh/python27/enable
|
||||
# We're now defaulted to 2.7
|
||||
|
||||
# Upgrade pip itself
|
||||
pip -q install --upgrade pip
|
||||
# python-devel needed to prevent setup.py crash
|
||||
pip -q install pycrypto
|
||||
# pycrypto 2.7.1 needed for latest security patch
|
||||
pip -q install setuptools --upgrade
|
||||
# virtualenv to make installing dependencies easier
|
||||
pip -q install virtualenv
|
||||
|
||||
wget -q https://github.com/trailofbits/algo/archive/master.zip
|
||||
unzip master.zip
|
||||
cd algo-master || echo "No Algo directory found"
|
||||
|
||||
# Set up a virtualenv and install the local Algo dependencies (must be run from algo-master)
|
||||
virtualenv env && source env/bin/activate
|
||||
pip -q install -r requirements.txt
|
||||
|
||||
# Edit the userlist and any other settings you desire
|
||||
nano config.cfg
|
||||
# Now you can run the Algo installer!
|
||||
./algo
|
||||
```
|
||||
|
||||
## Post-install macOS
|
||||
|
||||
1. Copy `./configs/*mobileconfig` to your local Mac
|
||||
|
||||
2. Install the VPN profile on your Mac (10.10+ required)
|
||||
|
||||
```shell
|
||||
/usr/bin/profiles -I -F ./x.x.x.x_NAME.mobileconfig
|
||||
```
|
||||
|
||||
3. To remove:
|
||||
|
||||
```shell
|
||||
/usr/bin/profiles -D -F ./x.x.x.x_NAME.mobileconfig
|
||||
```
|
||||
|
||||
The VPN connection will now appear under Networks (which can be pinned to the top menu bar if preferred)
|
|
@ -1,10 +1,13 @@
|
|||
# Deploy from script or cloud-init
|
||||
|
||||
You can use `install.sh` to prepare the environment and deploy AlgoVPN on the local Ubuntu server in one shot using cloud-init, or run the script directly on the server after it's been created. The script doesn't configure any parameters in your cloud, so it's on your own to configure related [firewall rules](/docs/firewalls.md), a floating ip address and other resources you may need. The output of the install script (including the p12 and CA passwords) and user config files will be installed into the `/opt/algo` directory.
|
||||
You can use `install.sh` to prepare the environment and deploy AlgoVPN on the local Ubuntu server in one shot using cloud-init, or run the script directly on the server after it's been created.
|
||||
The script doesn't configure any parameters in your cloud, so you're on your own to configure related [firewall rules](/docs/firewalls.md), a floating IP address and other resources you may need. The output of the install script (including the p12 and CA passwords) can be found at `/var/log/algo.log`, and user config files will be installed into the `/opt/algo/configs/localhost` directory. If you need to update users later, `cd /opt/algo`, change the user list in `config.cfg`, install additional dependencies as in step 4 of the [main README](https://github.com/trailofbits/algo/blob/master/README.md), and run `./algo update-users` from that directory.
|
||||
|
||||
## Cloud init deployment
|
||||
|
||||
You can copy-paste the snippet below to the user data (cloud-init or startup script) field when creating a new server. For now it is only possible for [DigitalOcean](https://www.digitalocean.com/docs/droplets/resources/metadata/), Amazon [EC2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and [Lightsail](https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-how-to-configure-server-additional-data-shell-script), [Google Cloud](https://cloud.google.com/compute/docs/startupscript), [Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init) and [Vultr](https://my.vultr.com/startup/), although Vultr doesn't [officially support cloud-init](https://www.vultr.com/docs/getting-started-with-cloud-init).
|
||||
You can copy-paste the snippet below to the user data (cloud-init or startup script) field when creating a new server.
|
||||
|
||||
For now this has only been successfully tested on [DigitalOcean](https://www.digitalocean.com/docs/droplets/resources/metadata/), Amazon [EC2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and [Lightsail](https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-how-to-configure-server-additional-data-shell-script), [Google Cloud](https://cloud.google.com/compute/docs/startupscript), [Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init) and [Vultr](https://my.vultr.com/startup/), although Vultr doesn't [officially support cloud-init](https://www.vultr.com/docs/getting-started-with-cloud-init).
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
|
@ -14,19 +17,31 @@ The command will prepare the environment and install AlgoVPN with the default pa
|
|||
|
||||
## Variables
|
||||
|
||||
`METHOD` - which method of the deployment to use. Possible values are local and cloud. Default: cloud. The cloud method is intended to use in cloud-init deployments only. If you are not using cloud-init to deploy the server you have to use the local method.
|
||||
`ONDEMAND_CELLULAR` - "Connect On Demand" when connected to cellular networks. Boolean. Default: false.
|
||||
`ONDEMAND_WIFI` - "Connect On Demand" when connected to Wi-Fi. Default: false.
|
||||
`ONDEMAND_WIFI_EXCLUDE` - List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand". Comma-separated list.
|
||||
`STORE_PKI` - To retain the PKI. (required to add users in the future, but less secure). Default: false.
|
||||
`DNS_ADBLOCKING` - To install an ad blocking DNS resolver. Default: false.
|
||||
`SSH_TUNNELING` - Enable SSH tunneling for each user. Default: false.
|
||||
`ENDPOINT` - The public IP address or domain name of your server: (IMPORTANT! This is used to verify the certificate). It will be gathered automatically for DigitalOcean, AWS, GCE, Azure or Vultr if the `METHOD` is cloud. Otherwise you need to define this variable according to your public IP address.
|
||||
`USERS` - list of VPN users. Comma-separated list. Default: user1.
|
||||
`REPO_SLUG` - Owner and repository that used to get the installation scripts from. Default: trailofbits/algo.
|
||||
`REPO_BRANCH` - Branch for `REPO_SLUG`. Default: master.
|
||||
`EXTRA_VARS` - Additional extra variables.
|
||||
`ANSIBLE_EXTRA_ARGS` - Any available ansible parameters. ie: `--skip-tags apparmor`.
|
||||
- `METHOD`: which method of the deployment to use. Possible values are local and cloud. Default: cloud. The cloud method is intended to use in cloud-init deployments only. If you are not using cloud-init to deploy the server you have to use the local method.
|
||||
|
||||
- `ONDEMAND_CELLULAR`: "Connect On Demand" when connected to cellular networks. Boolean. Default: false.
|
||||
|
||||
- `ONDEMAND_WIFI`: "Connect On Demand" when connected to Wi-Fi. Default: false.
|
||||
|
||||
- `ONDEMAND_WIFI_EXCLUDE`: List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand". Comma-separated list.
|
||||
|
||||
- `STORE_PKI`: To retain the PKI. (required to add users in the future, but less secure). Default: false.
|
||||
|
||||
- `DNS_ADBLOCKING`: To install an ad blocking DNS resolver. Default: false.
|
||||
|
||||
- `SSH_TUNNELING`: Enable SSH tunneling for each user. Default: false.
|
||||
|
||||
- `ENDPOINT`: The public IP address or domain name of your server: (IMPORTANT! This is used to verify the certificate). It will be gathered automatically for DigitalOcean, AWS, GCE, Azure or Vultr if the `METHOD` is cloud. Otherwise you need to define this variable according to your public IP address.
|
||||
|
||||
- `USERS`: list of VPN users. Comma-separated list. Default: user1.
|
||||
|
||||
- `REPO_SLUG`: Owner and repository that used to get the installation scripts from. Default: trailofbits/algo.
|
||||
|
||||
- `REPO_BRANCH`: Branch for `REPO_SLUG`. Default: master.
|
||||
|
||||
- `EXTRA_VARS`: Additional extra variables.
|
||||
|
||||
- `ANSIBLE_EXTRA_ARGS`: Any available ansible parameters. ie: `--skip-tags apparmor`.
|
||||
|
||||
## Examples
|
||||
|
||||
|
|
|
@ -1,8 +1,15 @@
|
|||
# Windows client prerequisite
|
||||
# Deploy from Windows
|
||||
|
||||
The Algo scripts can't be run directly on Windows, but you can use the Windows Subsystem for Linux (WSL) to run a copy of Ubuntu Linux right on your Windows system. You can then run Algo to deploy a VPN server to a supported cloud provider, though you can't turn the instance of Ubuntu running under WSL into a VPN server.
|
||||
|
||||
To run WSL you will need:
|
||||
|
||||
* A 64-bit system
|
||||
* 64-bit Windows 10 (Anniversary update or later version)
|
||||
|
||||
Once you verify your system is 64-bit (32-bit is not supported) and up to date, you have to do a few manual steps to enable the 'Windows Subsystem for Linux':
|
||||
## Install WSL
|
||||
|
||||
Enable the 'Windows Subsystem for Linux':
|
||||
|
||||
1. Open 'Settings'
|
||||
2. Click 'Update & Security', then click the 'For developers' option on the left.
|
||||
|
@ -14,20 +21,54 @@ Wait a minute for Windows to install a few things in the background (it will eve
|
|||
2. Click on 'Turn Windows features on or off'
|
||||
3. Scroll down and check 'Windows Subsystem for Linux', and then click OK.
|
||||
4. The subsystem will be installed, then Windows will require a restart.
|
||||
5. Restart Windows and then [install Ubuntu from the Windows Store](https://www.microsoft.com/p/ubuntu/9nblggh4msv6).
|
||||
5. Restart Windows and then install [Ubuntu 20.04 LTS from the Windows Store](https://www.microsoft.com/p/ubuntu-2004-lts/9n6svws3rx71).
|
||||
6. Run Ubuntu from the Start menu. It will take a few minutes to install. It will have you create a separate user account for the Linux subsystem. Once that's done, you will finally have Ubuntu running somewhat integrated with Windows.
|
||||
|
||||
## Install Algo
|
||||
|
||||
Install additional packages:
|
||||
Run these commands in the Ubuntu Terminal to install a prerequisite package and download the Algo scripts to your home directory. Note that when using WSL you should **not** install Algo in the `/mnt/c` directory due to problems with file permissions.
|
||||
|
||||
You may need to follow [these directions](https://devblogs.microsoft.com/commandline/copy-and-paste-arrives-for-linuxwsl-consoles/) in order to paste commands into the Ubuntu Terminal.
|
||||
|
||||
```shell
|
||||
sudo apt-get update && sudo apt-get install git build-essential libssl-dev libffi-dev python-dev python-pip python-setuptools python-virtualenv -y
|
||||
cd
|
||||
umask 0002
|
||||
sudo apt update
|
||||
sudo apt install -y python3-virtualenv
|
||||
git clone https://github.com/trailofbits/algo
|
||||
cd algo
|
||||
```
|
||||
|
||||
Clone the Algo repository:
|
||||
## Post installation steps
|
||||
|
||||
These steps should be only if you clone the Algo repository to the host machine disk (C:, D:, etc.). WSL mount host system disks to `\mnt` directory.
|
||||
|
||||
### Allow git to change files metadata
|
||||
|
||||
By default git cannot change files metadata (using chmod for example) for files stored at host machine disks (https://docs.microsoft.com/en-us/windows/wsl/wsl-config#set-wsl-launch-settings). Allow it:
|
||||
|
||||
1. Start Ubuntu Terminal.
|
||||
2. Edit /etc/wsl.conf (create it if it doesn't exist). Add the following:
|
||||
```
|
||||
[automount]
|
||||
options = "metadata"
|
||||
```
|
||||
3. Close all Ubuntu Terminals.
|
||||
4. Run powershell.
|
||||
5. Run `wsl --shutdown` in powershell.
|
||||
|
||||
### Allow run Ansible in a world writable directory
|
||||
|
||||
Ansible threat host machine directories as world writable directory and do not load .cfg from it by default (https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir). For fix run inside `algo` directory:
|
||||
|
||||
```shell
|
||||
cd ~ && git clone https://github.com/trailofbits/algo && cd algo
|
||||
chmod 744 .
|
||||
```
|
||||
|
||||
Now, you can go through the [README](https://github.com/trailofbits/algo#deploy-the-algo-server) (start from the 4th step) and deploy your Algo server!
|
||||
Now you can continue by following the [README](https://github.com/trailofbits/algo#deploy-the-algo-server) from the 4th step to deploy your Algo server!
|
||||
|
||||
You'll be instructed to edit the file `config.cfg` in order to specify the Algo user accounts to be created. If you're new to Linux the simplest editor to use is `nano`. To edit the file while in the `algo` directory, run:
|
||||
```shell
|
||||
nano config.cfg
|
||||
```
|
||||
Once `./algo` has finished you can use the `cp` command to copy the configuration files from the `configs` directory into your Windows directory under `/mnt/c/Users` for easier access.
|
||||
|
|
|
@ -1,11 +1,25 @@
|
|||
# Local Installation
|
||||
|
||||
You can use Algo to configure a pre-existing server as an AlgoVPN rather than using it create and configure a new server on a supported cloud provider. This is referred to as a **local** installation rather than a **cloud** deployment.
|
||||
**PLEASE NOTE**: Algo is intended for use to create a _dedicated_ VPN server. No uninstallation option is provided. If you install Algo on an existing server any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
|
||||
|
||||
------
|
||||
|
||||
## Outbound VPN Server
|
||||
|
||||
You can use Algo to configure a pre-existing server as an AlgoVPN rather than using it to create and configure a new server on a supported cloud provider. This is referred to as a **local** installation rather than a **cloud** deployment. If you're new to Algo or unfamiliar with Linux you'll find a cloud deployment to be easier.
|
||||
|
||||
To perform a local installation, install the Algo scripts following the normal installation instructions, then choose:
|
||||
|
||||
Install the Algo scripts following the normal installation instructions, then choose:
|
||||
```
|
||||
Install to existing Ubuntu 18.04 or 19.04 server (Advanced)
|
||||
Install to existing Ubuntu latest LTS server (for more advanced users)
|
||||
```
|
||||
|
||||
Make sure your target server is running an unmodified copy of the operating system version specified. The target can be the same system where you've installed the Algo scripts, or a remote system that you are able to access as root via SSH without needing to enter the SSH key passphrase (such as when using `ssh-agent`).
|
||||
|
||||
**PLEASE NOTE**: Algo is intended for use to create a _dedicated_ VPN server. No uninstallation option is provided. If you install Algo on an existing server any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
|
||||
## Inbound VPN Server (also called "Road Warrior" setup)
|
||||
|
||||
Some may find it useful to set up an Algo server on an Ubuntu box on your home LAN, with the intention of being able to securely access your LAN and any resources on it when you're traveling elsewhere (the ["road warrior" setup](https://en.wikipedia.org/wiki/Road_warrior_(computing))). A few tips if you're doing so:
|
||||
|
||||
- Make sure you forward any [relevant incoming ports](/docs/firewalls.md#external-firewall) to the Algo server from your router;
|
||||
- Change `BetweenClients_DROP` in `config.cfg` to `false`, and also consider changing `block_smb` and `block_netbios` to `false`;
|
||||
- If you want to use a DNS server on your LAN to resolve local domain names properly (e.g. a Pi-hole), set the `dns_encryption` flag in `config.cfg` to `false`, and change `dns_servers` to the local DNS server IP (i.e. `192.168.1.2`).
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Algo officially supports the [cloud providers listed here](https://github.com/trailofbits/algo/blob/master/README.md#deploy-the-algo-server). If you want to deploy Algo on another virtual hosting provider, that provider must support:
|
||||
|
||||
1. the base operating system image that Algo uses (Ubuntu 18.04, 19.04), and
|
||||
1. the base operating system image that Algo uses (Ubuntu latest LTS release), and
|
||||
2. a minimum of certain kernel modules required for the strongSwan IPsec server.
|
||||
|
||||
Please see the [Required Kernel Modules](https://wiki.strongswan.org/projects/strongswan/wiki/KernelModules) documentation from strongSwan for a list of the specific required modules and a script to check for them. As a first step, we recommend running their shell script to determine initial compatibility with your new hosting provider.
|
||||
|
|
25
docs/faq.md
25
docs/faq.md
|
@ -1,6 +1,7 @@
|
|||
# FAQ
|
||||
|
||||
* [Has Algo been audited?](#has-algo-been-audited)
|
||||
* [What's the current status of WireGuard?](#whats-the-current-status-of-wireguard)
|
||||
* [Why aren't you using Tor?](#why-arent-you-using-tor)
|
||||
* [Why aren't you using Racoon, LibreSwan, or OpenSwan?](#why-arent-you-using-racoon-libreswan-or-openswan)
|
||||
* [Why aren't you using a memory-safe or verified IKE daemon?](#why-arent-you-using-a-memory-safe-or-verified-ike-daemon)
|
||||
|
@ -11,14 +12,16 @@
|
|||
* [Can DNS filtering be disabled?](#can-dns-filtering-be-disabled)
|
||||
* [Wasn't IPSEC backdoored by the US government?](#wasnt-ipsec-backdoored-by-the-us-government)
|
||||
* [What inbound ports are used?](#what-inbound-ports-are-used)
|
||||
* [How do I monitor user activity?](#how-do-i-monitor-user-activity)
|
||||
* [How do I reach another connected client?](#how-do-i-reach-another-connected-client)
|
||||
|
||||
## Has Algo been audited?
|
||||
|
||||
No. This project is under active development. We're happy to [accept and fix issues](https://github.com/trailofbits/algo/issues) as they are identified. Use Algo at your own risk. If you find a security issue of any severity, please [contact us on Slack](https://empireslacking.herokuapp.com).
|
||||
No. This project is under active development. We're happy to [accept and fix issues](https://github.com/trailofbits/algo/issues) as they are identified. Use Algo at your own risk. If you find a security issue of any severity, please [contact us on Slack](https://slack.empirehacking.nyc).
|
||||
|
||||
## What's the current status of WireGuard?
|
||||
|
||||
[WireGuard is a work in progress](https://www.wireguard.com/#work-in-progress). It has undergone [substantial](https://www.wireguard.com/formal-verification/) security review, however, its authors are appropriately cautious about its safety and the protocol is subject to change. As a result, WireGuard does not yet have a "stable" 1.0 release. Releases are tagged with their build date -- "0.0.YYYYMMDD" -- and users should be advised to apply new updates when they are available.
|
||||
[WireGuard reached "stable" 1.0.0 release](https://lists.zx2c4.com/pipermail/wireguard/2020-March/005206.html) in Spring 2020. It has undergone [substantial](https://www.wireguard.com/formal-verification/) security review.
|
||||
|
||||
## Why aren't you using Tor?
|
||||
|
||||
|
@ -42,11 +45,11 @@ Alpine Linux is not supported out-of-the-box by any major cloud provider. We are
|
|||
|
||||
## I deployed an Algo server. Can you update it with new features?
|
||||
|
||||
No. By design, the Algo development team has no access to any Algo server that our users haved deployed. We cannot modify the configuration, update the software, or sniff the traffic that goes through your personal Algo VPN server. This prevents scenarios where we are legally compelled or hacked to push down backdoored updates that surveil our users.
|
||||
No. By design, the Algo development team has no access to any Algo server that our users have deployed. We cannot modify the configuration, update the software, or sniff the traffic that goes through your personal Algo VPN server. This prevents scenarios where we are legally compelled or hacked to push down backdoored updates that surveil our users.
|
||||
|
||||
As a result, once your Algo server has been deployed, it is yours to maintain. If you want to take advantage of new features available in the current release of Algo, then you have two options. You can use the [SSH administrative interface](/README.md#ssh-into-algo-server) to make the changes you want on your own or you can shut down the server and deploy a new one (recommended).
|
||||
As a result, once your Algo server has been deployed, it is yours to maintain. It will use unattended-upgrades by default to apply security and feature updates to Ubuntu, as well as to the core VPN software of strongSwan, dnscrypt-proxy and WireGuard. However, if you want to take advantage of new features available in the current release of Algo, then you have two options. You can use the [SSH administrative interface](/README.md#ssh-into-algo-server) to make the changes you want on your own or you can shut down the server and deploy a new one (recommended).
|
||||
|
||||
In the future, we will make it easier for users who want to update their own servers by providing official releases of Algo. Each official release will summarize the changes from the last release to make it easier to follow along with them.
|
||||
As an extension of this rationale, most configuration options (other than users) available in `config.cfg` can only be set at the time of initial deployment.
|
||||
|
||||
## Where did the name "Algo" come from?
|
||||
|
||||
|
@ -54,7 +57,7 @@ Algo is short for "Al Gore", the **V**ice **P**resident of **N**etworks everywhe
|
|||
|
||||
## Can DNS filtering be disabled?
|
||||
|
||||
You can temporarily disable DNS filtering for all IPsec clients at once with the following workaround: SSH to your Algo server (using the 'shell access' command printed upon a successful deployment), edit `/etc/ipsec.conf`, and change `rightdns=<random_ip>` to `rightdns=8.8.8.8`. Then run `sudo systemctl restart strongswan`. DNS filtering for Wireguard clients has to be disabled on each client device separately by modifying the settings in the app, or by directly modifying the `DNS` setting on the `clientname.conf` file. If all else fails, we recommend deploying a new Algo server without the adblocking feature enabled.
|
||||
You can temporarily disable DNS filtering for all IPsec clients at once with the following workaround: SSH to your Algo server (using the 'shell access' command printed upon a successful deployment), edit `/etc/ipsec.conf`, and change `rightdns=<random_ip>` to `rightdns=8.8.8.8`. Then run `sudo systemctl restart strongswan`. DNS filtering for WireGuard clients has to be disabled on each client device separately by modifying the settings in the app, or by directly modifying the `DNS` setting on the `clientname.conf` file. If all else fails, we recommend deploying a new Algo server without the adblocking feature enabled.
|
||||
|
||||
## Wasn't IPSEC backdoored by the US government?
|
||||
|
||||
|
@ -78,4 +81,12 @@ No.
|
|||
|
||||
## What inbound ports are used?
|
||||
|
||||
You should only need 22/TCP, 500/UDP, 4500/UDP, and 51820/UDP opened on any firewall that sits between your clients and your Algo server. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
|
||||
You should only need 4160/TCP, 500/UDP, 4500/UDP, and 51820/UDP opened on any firewall that sits between your clients and your Algo server. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
|
||||
|
||||
## How do I monitor user activity?
|
||||
|
||||
Your Algo server will track IPsec client logins by default in `/var/log/syslog`. This will give you client names, date/time of connection and reconnection, and what IP addresses they're connecting from. This can be disabled entirely by setting `strongswan_log_level` to `-1` in `config.cfg`. WireGuard doesn't save any logs, but entering `sudo wg` on the server will give you the last endpoint and contact time of each client. Disabling this is [paradoxically difficult](https://git.zx2c4.com/blind-operator-mode/about/). There isn't any out-of-the-box way to monitor actual user _activity_ (e.g. websites browsed, etc.)
|
||||
|
||||
## How do I reach another connected client?
|
||||
|
||||
By default, your Algo server doesn't allow connections between connected clients. This can be changed at the time of deployment by enabling the `BetweenClients_DROP` flag in `config.cfg`. See the ["Road Warrior" instructions](/docs/deploy-to-ubuntu.md#road-warrior-setup) for more details.
|
||||
|
|
|
@ -24,7 +24,7 @@ Any external firewall must be configured to pass the following incoming ports ov
|
|||
|
||||
Port | Protocol | Description | Related variables in `config.cfg`
|
||||
---- | -------- | ----------- | ---------------------------------
|
||||
22 | TCP | Secure Shell (SSH) | None
|
||||
4160 | TCP | Secure Shell (SSH) | `ssh_port` (**cloud** only; for **local** port remains 22)
|
||||
500 | UDP | IPsec IKEv2 | `ipsec_enabled`
|
||||
4500 | UDP | IPsec NAT-T | `ipsec_enabled`
|
||||
51820 | UDP | WireGuard | `wireguard_enabled`, `wireguard_port`
|
||||
|
|
BIN
docs/images/cloud-alternative-ingress-ip.png
Normal file
BIN
docs/images/cloud-alternative-ingress-ip.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 34 KiB |
Binary file not shown.
Before Width: | Height: | Size: 121 KiB After Width: | Height: | Size: 70 KiB |
|
@ -1,28 +1,31 @@
|
|||
# Algo VPN documentation
|
||||
|
||||
* Deployment instructions
|
||||
- Deploy from [Fedora Workstation (26)](deploy-from-fedora-workstation.md)
|
||||
- Deploy from [RedHat/CentOS 6.x](deploy-from-redhat-centos6.md)
|
||||
- Deploy from [Windows](deploy-from-windows.md)
|
||||
- Deploy from a [Docker container](deploy-from-docker.md)
|
||||
- Deploy from [Ansible](deploy-from-ansible.md) non-interactively
|
||||
- Deploy onto a [cloud server at time of creation](deploy-from-script-or-cloud-init-to-localhost.md)
|
||||
- Deploy onto a [cloud server at time of creation with shell script or cloud-init](deploy-from-script-or-cloud-init-to-localhost.md)
|
||||
- Deploy from [macOS](deploy-from-macos.md)
|
||||
- Deploy from [Google Cloud Shell](deploy-from-cloudshell.md)
|
||||
* Client setup
|
||||
- Setup [Android](client-android.md) clients
|
||||
- Setup [Generic/Linux](client-linux.md) clients with Ansible
|
||||
- Setup Ubuntu clients to use [WireGuard](client-linux-wireguard.md)
|
||||
- Setup Apple devices to use [IPSEC](client-apple-ipsec.md)
|
||||
- Setup Macs running macOS 10.13 or older to use [Wireguard](client-macos-wireguard.md)
|
||||
- Manual Windows 10 client setup for [IPSEC](client-windows.md)
|
||||
- Setup Linux clients to use [IPsec](client-linux-ipsec.md)
|
||||
- Setup Apple devices to use [IPsec](client-apple-ipsec.md)
|
||||
- Setup Macs running macOS 10.13 or older to use [WireGuard](client-macos-wireguard.md)
|
||||
* Cloud provider setup
|
||||
- Configure [Amazon EC2](cloud-amazon-ec2.md)
|
||||
- Configure [Azure](cloud-azure.md)
|
||||
- Configure [DigitalOcean](cloud-do.md)
|
||||
- Configure [Google Cloud Platform](cloud-gce.md)
|
||||
- Configure [Vultr](cloud-vultr.md)
|
||||
- Configure [CloudStack](cloud-cloudstack.md)
|
||||
- Configure [Hetzner Cloud](cloud-hetzner.md)
|
||||
* Advanced Deployment
|
||||
- Deploy to your own [FreeBSD](deploy-to-freebsd.md) server
|
||||
- Deploy to your own [Ubuntu](deploy-to-ubuntu.md) server
|
||||
- Deploy to your own [Ubuntu](deploy-to-ubuntu.md) server, and road warrior setup
|
||||
- Deploy to an [unsupported cloud provider](deploy-to-unsupported-cloud.md)
|
||||
* [FAQ](faq.md)
|
||||
* [Firewalls](firewalls.md)
|
||||
|
|
|
@ -9,16 +9,21 @@ First of all, check [this](https://github.com/trailofbits/algo#features) and ens
|
|||
* [Error: "TypeError: must be str, not bytes"](#error-typeerror-must-be-str-not-bytes)
|
||||
* [Error: "ansible-playbook: command not found"](#error-ansible-playbook-command-not-found)
|
||||
* [Error: "Could not fetch URL ... TLSV1_ALERT_PROTOCOL_VERSION](#could-not-fetch-url--tlsv1_alert_protocol_version)
|
||||
* [Fatal: "Failed to validate the SSL certificate for ..."](#fatal-failed-to-validate-the-SSL-certificate)
|
||||
* [Bad owner or permissions on .ssh](#bad-owner-or-permissions-on-ssh)
|
||||
* [The region you want is not available](#the-region-you-want-is-not-available)
|
||||
* [AWS: SSH permission denied with an ECDSA key](#aws-ssh-permission-denied-with-an-ecdsa-key)
|
||||
* [AWS: "Deploy the template" fails with CREATE_FAILED](#aws-deploy-the-template-fails-with-create_failed)
|
||||
* [AWS: not authorized to perform: cloudformation:UpdateStack](#aws-not-authorized-to-perform-cloudformationupdatestack)
|
||||
* [DigitalOcean: error tagging resource 'xxxxxxxx': param is missing or the value is empty: resources](#digitalocean-error-tagging-resource)
|
||||
* [Azure: The client xxx with object id xxx does not have authorization to perform action Microsoft.Resources/subscriptions/resourcegroups/write' over scope](#azure-deployment-permissions-error)
|
||||
* [Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid](#windows-the-value-of-parameter-linuxconfigurationsshpublickeyskeydata-is-invalid)
|
||||
* [Docker: Failed to connect to the host via ssh](#docker-failed-to-connect-to-the-host-via-ssh)
|
||||
* [Error: Failed to create symlinks for deploying to localhost](#error-failed-to-create-symlinks-for-deploying-to-localhost)
|
||||
* [Wireguard: Unable to find 'configs/...' in expected paths](#wireguard-unable-to-find-configs-in-expected-paths)
|
||||
* [Ubuntu Error: "unable to write 'random state'" when generating CA password](#ubuntu-error-unable-to-write-random-state-when-generating-ca-password)
|
||||
* [Timeout when waiting for search string OpenSSH in xxx.xxx.xxx.xxx:4160](#old-networking-firewall-in-place)
|
||||
* [Linode Error: "Unable to query the Linode API. Saw: 400: The requested distribution is not supported by this stackscript.; "](#linode-error-uable-to-query-the-linode-api-saw-400-the-requested-distribution-is-not-supported-by-this-stackscript)
|
||||
* [Connection Problems](#connection-problems)
|
||||
* [I'm blocked or get CAPTCHAs when I access certain websites](#im-blocked-or-get-captchas-when-i-access-certain-websites)
|
||||
* [I want to change the list of trusted Wifi networks on my Apple device](#i-want-to-change-the-list-of-trusted-wifi-networks-on-my-apple-device)
|
||||
|
@ -36,6 +41,10 @@ First of all, check [this](https://github.com/trailofbits/algo#features) and ens
|
|||
|
||||
Look here if you have a problem running the installer to set up a new Algo server.
|
||||
|
||||
### Python version is not supported
|
||||
|
||||
The minimum Python version required to run Algo is 3.8. Most modern operation systems should have it by default, but if the OS you are using doesn't meet the requirements, you have to upgrade. See the official documentation for your OS, or manual download it from https://www.python.org/downloads/. Otherwise, you may [deploy from docker](deploy-from-docker.md)
|
||||
|
||||
### Error: "You have not agreed to the Xcode license agreements"
|
||||
|
||||
On macOS, you tried to install the dependencies with pip and encountered the following error:
|
||||
|
@ -105,25 +114,22 @@ Command /usr/bin/python -c "import setuptools, tokenize;__file__='/private/tmp/p
|
|||
Storing debug log for failure in /Users/algore/Library/Logs/pip.log
|
||||
```
|
||||
|
||||
You are running an old version of `pip` that cannot download the binary `cryptography` dependency. Upgrade to a new version of `pip` by running `sudo pip install -U pip`.
|
||||
|
||||
### Error: "TypeError: must be str, not bytes"
|
||||
|
||||
You tried to install Algo and you see many repeated errors referencing `TypeError`, such as `TypeError: '>=' not supported between instances of 'TypeError' and 'int'` and `TypeError: must be str, not bytes`. For example:
|
||||
|
||||
```
|
||||
TASK [Wait until SSH becomes ready...] *****************************************
|
||||
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: TypeError: must be str, not bytes
|
||||
fatal: [localhost -> localhost]: FAILED! => {"changed": false, "failed": true, "module_stderr": "Traceback (most recent call last):\n File \"/var/folders/x_/nvr61v455qq98vp22k5r5vm40000gn/T/ansible_6sdjysth/ansible_module_wait_for.py\", line 538, in <module>\n main()\n File \"/var/folders/x_/nvr61v455qq98vp22k5r5vm40000gn/T/ansible_6sdjysth/ansible_module_wait_for.py\", line 483, in main\n data += response\nTypeError: must be str, not bytes\n", "module_stdout": "", "msg": "MODULE FAILURE"}
|
||||
```
|
||||
|
||||
You may be trying to run Algo with Python3. Algo uses [Ansible](https://github.com/ansible/ansible) which has issues with Python3, although this situation is improving over time. Try running Algo with Python2 to fix this issue. Open your terminal and `cd` to the directory with Algo, then run: ``virtualenv -p `which python2.7` env && source env/bin/activate && pip install -r requirements.txt``
|
||||
You are running an old version of `pip` that cannot download the binary `cryptography` dependency. Upgrade to a new version of `pip` by running `sudo python3 -m pip install -U pip`.
|
||||
|
||||
### Error: "ansible-playbook: command not found"
|
||||
|
||||
You tried to install Algo and you see an error that reads "ansible-playbook: command not found."
|
||||
|
||||
You did not finish step 4 in the installation instructions, "[Install Algo's remaining dependencies](https://github.com/trailofbits/algo#deploy-the-algo-server)." Algo depends on [Ansible](https://github.com/ansible/ansible), an automation framework, and this error indicates that you do not have Ansible installed. Ansible is installed by `pip` when you run `python -m pip install -r requirements.txt`. You must complete the installation instructions to run the Algo server deployment process.
|
||||
You did not finish step 4 in the installation instructions, "[Install Algo's remaining dependencies](https://github.com/trailofbits/algo#deploy-the-algo-server)." Algo depends on [Ansible](https://github.com/ansible/ansible), an automation framework, and this error indicates that you do not have Ansible installed. Ansible is installed by `pip` when you run `python3 -m pip install -r requirements.txt`. You must complete the installation instructions to run the Algo server deployment process.
|
||||
|
||||
### Fatal: "Failed to validate the SSL certificate"
|
||||
|
||||
You received a message like this:
|
||||
```
|
||||
fatal: [localhost]: FAILED! => {"changed": false, "msg": "Failed to validate the SSL certificate for api.digitalocean.com:443. Make sure your managed systems have a valid CA certificate installed. You can use validate_certs=False if you do not need to confirm the servers identity but this is unsafe and not recommended. Paths checked for this platform: /etc/ssl/certs, /etc/ansible, /usr/local/etc/openssl. The exception msg was: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1076).", "status": -1, "url": "https://api.digitalocean.com/v2/regions"}
|
||||
```
|
||||
|
||||
Your local system does not have a CA certificate that can validate the cloud provider's API. Are you using MacPorts instead of Homebrew? The MacPorts openssl installation does not include a CA certificate, but you can fix this by installing the [curl-ca-bundle](https://andatche.com/articles/2012/02/fixing-ssl-ca-certificates-with-openssl-from-macports/) port with `port install curl-ca-bundle`. That should do the trick.
|
||||
|
||||
### Could not fetch URL ... TLSV1_ALERT_PROTOCOL_VERSION
|
||||
|
||||
|
@ -137,9 +143,9 @@ No matching distribution found for SecretStorage<3 (from -r requirements.txt (li
|
|||
|
||||
It's time to upgrade your python.
|
||||
|
||||
`brew upgrade python2`
|
||||
`brew upgrade python3`
|
||||
|
||||
You can also download python 2.7.x from python.org.
|
||||
You can also download python 3.7.x from python.org.
|
||||
|
||||
### Bad owner or permissions on .ssh
|
||||
|
||||
|
@ -221,6 +227,40 @@ The error is caused because Digital Ocean changed its API to treat the tag argum
|
|||
5. Finally run `doctl compute tag list` to make sure that the tag has been deleted
|
||||
6. Run algo as directed
|
||||
|
||||
### Azure: No such file or directory: '/home/username/.azure/azureProfile.json'
|
||||
|
||||
```
|
||||
TASK [cloud-azure : Create AlgoVPN Server] *****************************************************************************************************************************************************************
|
||||
An exception occurred during task execution. To see the full traceback, use -vvv.
|
||||
The error was: FileNotFoundError: [Errno 2] No such file or directory: '/home/ubuntu/.azure/azureProfile.json'
|
||||
fatal: [localhost]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):
|
||||
File \"/usr/local/lib/python3.6/dist-packages/azure/cli/core/_session.py\", line 39, in load
|
||||
with codecs_open(self.filename, 'r', encoding=self._encoding) as f:
|
||||
File \"/usr/lib/python3.6/codecs.py\", line 897, in open\n file = builtins.open(filename, mode, buffering)
|
||||
FileNotFoundError: [Errno 2] No such file or directory: '/home/ubuntu/.azure/azureProfile.json'
|
||||
", "module_stdout": "", "msg": "MODULE FAILURE
|
||||
See stdout/stderr for the exact error", "rc": 1}
|
||||
```
|
||||
|
||||
It happens when your machine is not authenticated in the azure cloud, follow this [guide](https://trailofbits.github.io/algo/cloud-azure.html) to configure your environment
|
||||
|
||||
### Azure: Deployment Permissions Error
|
||||
|
||||
The AAD Application Registration (aka, the 'Service Principal', where you got the ClientId) needs permission to create the resources for the subscription. Otherwise, you will get the following error when you run the Ansible deploy script:
|
||||
|
||||
```
|
||||
fatal: [localhost]: FAILED! => {"changed": false, "msg": "Resource group create_or_update failed with status code: 403 and message: The client 'xxxxx' with object id 'THE_OBJECT_ID' does not have authorization to perform action 'Microsoft.Resources/subscriptions/resourcegroups/write' over scope '/subscriptions/THE_SUBSCRIPTION_ID/resourcegroups/algo' or the scope is invalid. If access was recently granted, please refresh your credentials."}
|
||||
```
|
||||
|
||||
The solution for this is to open the Azure CLI and run the following command to grant contributor role to the Service Principal:
|
||||
|
||||
```
|
||||
az role assignment create --assignee-object-id THE_OBJECT_ID --scope subscriptions/THE_SUBSCRIPTION_ID --role contributor
|
||||
```
|
||||
|
||||
After this is applied, the Service Principal has permissions to create the resources and you can re-run `ansible-playbook main.yml` to complete the deployment.
|
||||
|
||||
|
||||
### Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid
|
||||
|
||||
You tried to deploy Algo from Windows and you received an error like this one:
|
||||
|
@ -254,6 +294,41 @@ You need to add the following to the ansible.cfg in repo root:
|
|||
control_path_dir=/dev/shm/ansible_control_path
|
||||
```
|
||||
|
||||
### Error: Failed to create symlinks for deploying to localhost
|
||||
|
||||
You tried to run Algo and you received an error like this one:
|
||||
|
||||
```
|
||||
TASK [Create a symlink if deploying to localhost] ********************************************************************
|
||||
fatal: [localhost]: FAILED! => {"changed": false, "gid": 1000, "group": "ubuntu", "mode": "0775", "msg": "the directory configs/localhost is not empty, refusing to convert it", "owner": "ubuntu", "path": "configs/localhost", "size": 4096, "state": "directory", "uid": 1000}
|
||||
included: /home/ubuntu/algo-master/playbooks/rescue.yml for localhost
|
||||
|
||||
TASK [debug] *********************************************************************************************************
|
||||
ok: [localhost] => {
|
||||
"fail_hint": [
|
||||
"Sorry, but something went wrong!",
|
||||
"Please check the troubleshooting guide.",
|
||||
"https://trailofbits.github.io/algo/troubleshooting.html"
|
||||
]
|
||||
}
|
||||
|
||||
TASK [Fail the installation] *****************************************************************************************
|
||||
```
|
||||
This error is usually encountered when using the local install option and `localhost` is provided in answer to this question, which is expecting an IP address or domain name of your server:
|
||||
```
|
||||
Enter the public IP address or domain name of your server: (IMPORTANT! This is used to verify the certificate)
|
||||
[localhost]
|
||||
:
|
||||
```
|
||||
|
||||
You should remove the files in /etc/wireguard/ and configs/ as follows:
|
||||
```ssh
|
||||
sudo rm -rf /etc/wireguard/*
|
||||
rm -rf configs/*
|
||||
```
|
||||
|
||||
And then immediately re-run `./algo` and provide a domain name or IP address in response to the question referenced above.
|
||||
|
||||
### Wireguard: Unable to find 'configs/...' in expected paths
|
||||
|
||||
You tried to run Algo and you received an error like this one:
|
||||
|
@ -264,10 +339,11 @@ TASK [wireguard : Generate public keys] ****************************************
|
|||
|
||||
fatal: [localhost]: FAILED! => {"msg": "An unhandled exception occurred while running the lookup plugin 'file'. Error was a <class 'ansible.errors.AnsibleError'>, original message: could not locate file in lookup: configs/xxx.xxx.xxx.xxx/wireguard//private/dan"}
|
||||
```
|
||||
This error is usually hit when using the local install option on a server that isn't Ubuntu 18.04. You should upgrade your server to Ubuntu 18.04. If this doesn't work, try removing `*.lock` files at /etc/wireguard/ as follows:
|
||||
This error is usually hit when using the local install option on a server that isn't Ubuntu 18.04 or later. You should upgrade your server to Ubuntu 18.04 or later. If this doesn't work, try removing files in /etc/wireguard/ and the configs directories as follows:
|
||||
|
||||
```ssh
|
||||
sudo rm -rf /etc/wireguard/*.lock
|
||||
sudo rm -rf /etc/wireguard/*
|
||||
rm -rf configs/*
|
||||
```
|
||||
Then immediately re-run `./algo`.
|
||||
|
||||
|
@ -288,6 +364,32 @@ sudo chown $USER:$USER $HOME/.rnd
|
|||
|
||||
Now, run Algo again.
|
||||
|
||||
### Old Networking Firewall In Place
|
||||
|
||||
You may see the following output when attemptint to run ./algo from your localhost:
|
||||
|
||||
```
|
||||
TASK [Wait until SSH becomes ready...] **********************************************************************************************************************
|
||||
fatal: [localhost]: FAILED! => {"changed": false, "elapsed": 321, "msg": "Timeout when waiting for search string OpenSSH in xxx.xxx.xxx.xxx:4160"}
|
||||
included: /home/<username>/algo/algo/playbooks/rescue.yml for localhost
|
||||
|
||||
TASK [debug] ************************************************************************************************************************************************
|
||||
ok: [localhost] => {
|
||||
"fail_hint": [
|
||||
"Sorry, but something went wrong!",
|
||||
"Please check the troubleshooting guide.",
|
||||
"https://trailofbits.github.io/algo/troubleshooting.html"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
If you see this error then one possible explanation is that you have a previous firewall configured in your cloud hosting provider which needs to be either updated or ideally removed. Removing this can often fix this issue.
|
||||
|
||||
### Linode Error: "Unable to query the Linode API. Saw: 400: The requested distribution is not supported by this stackscript.; "
|
||||
|
||||
StackScript is a custom deployment script that defines a set of configurations for a Linode instance (e.g. which distribution, specs, etc.). if you used algo with default values in the past deployments, a stackscript that would've been created is 're-used' in the deployment process (in fact, go see 'create Linodes' and under 'StackScripts' tab). Thus, there's a little chance that your deployment process will generate this 'unsupported stackscript' error due to a pre-existing StackScript that doesn't support a particular configuration setting or value due to an 'old' stackscript. The quickest solution is just to change the name of your deployment from the default value of 'algo' (or any other name that you've used before, again see the dashboard) and re-run the deployment.
|
||||
|
||||
|
||||
## Connection Problems
|
||||
|
||||
Look here if you deployed an Algo server but now have a problem connecting to it with a client.
|
||||
|
@ -414,32 +516,6 @@ Certain cloud providers (like AWS Lightsail) don't assign an IPv6 address to you
|
|||
|
||||
Manually disconnecting and then reconnecting should restore your connection. To solve this, you need to either "force IPv4 connection" if available on your phone, or install an IPv4 APN, which might be available from your carrier tech support. T-mobile's is available [for iOS here under "iOS IPv4/IPv6 fix"](https://www.reddit.com/r/tmobile/wiki/index), and [here is a walkthrough for Android phones](https://www.myopenrouter.com/article/vpn-connections-not-working-t-mobile-heres-how-fix).
|
||||
|
||||
### Error: name 'basestring' is not defined
|
||||
|
||||
```
|
||||
TASK [cloud-digitalocean : Creating a droplet...] *******************************************
|
||||
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: NameError: name 'basestring' is not defined
|
||||
fatal: [localhost]: FAILED! => {"changed": false, "msg": "name 'basestring' is not defined"}
|
||||
```
|
||||
|
||||
If you get something like the above it's likely you're not using a python2 virtualenv.
|
||||
|
||||
Ensure running `python2.7` drops you into a python 2 shell (it looks something like this)
|
||||
|
||||
```
|
||||
user@homebook ~ $ python2.7
|
||||
Python 2.7.10 (default, Feb 7 2017, 00:08:15)
|
||||
[GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)] on darwin
|
||||
Type "help", "copyright", "credits" or "license" for more information.
|
||||
>>>
|
||||
```
|
||||
|
||||
Then rerun the dependency installation explicitly using python 2.7
|
||||
|
||||
```
|
||||
python2.7 -m virtualenv --python=`which python2.7` env && source env/bin/activate && python2.7 -m pip install -U pip && python2.7 -m pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### IPsec: Difficulty connecting through router
|
||||
|
||||
Some routers treat IPsec connections specially because older versions of IPsec did not work properly through [NAT](https://en.wikipedia.org/wiki/Network_address_translation). If you're having problems connecting to your AlgoVPN through a specific router using IPsec you might need to change some settings on the router.
|
||||
|
@ -454,4 +530,4 @@ If your router runs [pfSense](https://www.pfsense.org) and a single IPsec client
|
|||
|
||||
## I have a problem not covered here
|
||||
|
||||
If you have an issue that you cannot solve with the guidance here, [join our Gitter](https://gitter.im/trailofbits/algo) and ask for help. If you think you found a new issue in Algo, [file an issue](https://github.com/trailofbits/algo/issues/new).
|
||||
If you have an issue that you cannot solve with the guidance here, [create a new discussion](https://github.com/trailofbits/algo/discussions) and ask for help. If you think you found a new issue in Algo, [file an issue](https://github.com/trailofbits/algo/issues/new).
|
||||
|
|
29
files/cloud-init/base.sh
Normal file
29
files/cloud-init/base.sh
Normal file
|
@ -0,0 +1,29 @@
|
|||
#!/bin/sh
|
||||
set -eux
|
||||
|
||||
# shellcheck disable=SC2230
|
||||
which sudo || until \
|
||||
apt-get update -y && \
|
||||
apt-get install sudo -yf --install-suggests; do
|
||||
sleep 3
|
||||
done
|
||||
|
||||
getent passwd algo || useradd -m -d /home/algo -s /bin/bash -G adm -p '!' algo
|
||||
|
||||
(umask 337 && echo "algo ALL=(ALL) NOPASSWD:ALL" >/etc/sudoers.d/10-algo-user)
|
||||
|
||||
cat <<EOF >/etc/ssh/sshd_config
|
||||
{{ lookup('template', 'files/cloud-init/sshd_config') }}
|
||||
EOF
|
||||
|
||||
test -d /home/algo/.ssh || sudo -u algo mkdir -m 0700 /home/algo/.ssh
|
||||
echo "{{ lookup('file', '{{ SSH_keys.public }}') }}" | (sudo -u algo tee /home/algo/.ssh/authorized_keys && chmod 0600 /home/algo/.ssh/authorized_keys)
|
||||
|
||||
ufw --force reset
|
||||
|
||||
# shellcheck disable=SC2015
|
||||
dpkg -l sshguard && until apt-get remove -y --purge sshguard; do
|
||||
sleep 3
|
||||
done || true
|
||||
|
||||
systemctl restart sshd.service
|
30
files/cloud-init/base.yml
Normal file
30
files/cloud-init/base.yml
Normal file
|
@ -0,0 +1,30 @@
|
|||
#cloud-config
|
||||
output: {all: '| tee -a /var/log/cloud-init-output.log'}
|
||||
|
||||
package_update: true
|
||||
package_upgrade: true
|
||||
|
||||
packages:
|
||||
- sudo
|
||||
|
||||
users:
|
||||
- default
|
||||
- name: algo
|
||||
homedir: /home/algo
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
groups: adm,netdev
|
||||
shell: /bin/bash
|
||||
lock_passwd: true
|
||||
ssh_authorized_keys:
|
||||
- "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
write_files:
|
||||
- path: /etc/ssh/sshd_config
|
||||
content: |
|
||||
{{ lookup('template', 'files/cloud-init/sshd_config') | indent(width=6) }}
|
||||
|
||||
runcmd:
|
||||
- set -x
|
||||
- ufw --force reset
|
||||
- sudo apt-get remove -y --purge sshguard || true
|
||||
- systemctl restart sshd.service
|
10
files/cloud-init/sshd_config
Normal file
10
files/cloud-init/sshd_config
Normal file
|
@ -0,0 +1,10 @@
|
|||
Port {{ ssh_port }}
|
||||
AllowGroups algo
|
||||
PermitRootLogin no
|
||||
PasswordAuthentication no
|
||||
ChallengeResponseAuthentication no
|
||||
UsePAM yes
|
||||
X11Forwarding yes
|
||||
PrintMotd no
|
||||
AcceptEnv LANG LC_*
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
23
input.yml
23
input.yml
|
@ -14,12 +14,15 @@
|
|||
- { name: DigitalOcean, alias: digitalocean }
|
||||
- { name: Amazon Lightsail, alias: lightsail }
|
||||
- { name: Amazon EC2, alias: ec2 }
|
||||
- { name: Vultr, alias: vultr }
|
||||
- { name: Microsoft Azure, alias: azure }
|
||||
- { name: Google Compute Engine, alias: gce }
|
||||
- { name: Hetzner Cloud, alias: hetzner }
|
||||
- { name: Vultr, alias: vultr }
|
||||
- { name: Scaleway, alias: scaleway }
|
||||
- { name: OpenStack (DreamCompute optimised), alias: openstack }
|
||||
- { name: Install to existing Ubuntu 18.04 or 19.04 server (Advanced), alias: local }
|
||||
- { name: CloudStack (Exoscale optimised), alias: cloudstack }
|
||||
- { name: Linode, alias: linode }
|
||||
- { name: Install to existing Ubuntu latest LTS server (for more advanced users), alias: local }
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
|
@ -50,11 +53,11 @@
|
|||
when:
|
||||
- server_name is undefined
|
||||
- algo_provider != "local"
|
||||
- block:
|
||||
|
||||
- name: Cellular On Demand prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to cellular networks?
|
||||
Do you want macOS/iOS clients to enable "Connect On Demand" when connected to cellular networks?
|
||||
[y/N]
|
||||
register: _ondemand_cellular
|
||||
when: ondemand_cellular is undefined
|
||||
|
@ -62,7 +65,7 @@
|
|||
- name: Wi-Fi On Demand prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS IPsec clients to enable "Connect On Demand" when connected to Wi-Fi?
|
||||
Do you want macOS/iOS clients to enable "Connect On Demand" when connected to Wi-Fi?
|
||||
[y/N]
|
||||
register: _ondemand_wifi
|
||||
when: ondemand_wifi is undefined
|
||||
|
@ -70,13 +73,12 @@
|
|||
- name: Trusted Wi-Fi networks prompt
|
||||
pause:
|
||||
prompt: |
|
||||
List the names of any trusted Wi-Fi networks where macOS/iOS IPsec clients should not use "Connect On Demand"
|
||||
List the names of any trusted Wi-Fi networks where macOS/iOS clients should not use "Connect On Demand"
|
||||
(e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
|
||||
register: _ondemand_wifi_exclude
|
||||
when:
|
||||
- ondemand_wifi_exclude is undefined
|
||||
- (ondemand_wifi|default(false)|bool) or
|
||||
(booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
|
||||
- (ondemand_wifi|default(false)|bool) or (booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
|
||||
|
||||
- name: Retain the PKI prompt
|
||||
pause:
|
||||
|
@ -84,8 +86,9 @@
|
|||
Do you want to retain the keys (PKI)? (required to add users in the future, but less secure)
|
||||
[y/N]
|
||||
register: _store_pki
|
||||
when: store_pki is undefined
|
||||
when: ipsec_enabled
|
||||
when:
|
||||
- store_pki is undefined
|
||||
- ipsec_enabled
|
||||
|
||||
- name: DNS adblocking prompt
|
||||
pause:
|
||||
|
|
25
install.sh
25
install.sh
|
@ -22,16 +22,7 @@ installRequirements() {
|
|||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update
|
||||
apt-get install \
|
||||
software-properties-common \
|
||||
git \
|
||||
build-essential \
|
||||
libssl-dev \
|
||||
libffi-dev \
|
||||
python-dev \
|
||||
python-pip \
|
||||
python-setuptools \
|
||||
python-virtualenv \
|
||||
bind9-host \
|
||||
python3-virtualenv \
|
||||
jq -y
|
||||
}
|
||||
|
||||
|
@ -39,18 +30,18 @@ getAlgo() {
|
|||
[ ! -d "algo" ] && git clone "https://github.com/${REPO_SLUG}" -b "${REPO_BRANCH}" algo
|
||||
cd algo
|
||||
|
||||
python -m virtualenv --python="$(command -v python2)" .venv
|
||||
python3 -m virtualenv --python="$(command -v python3)" .env
|
||||
# shellcheck source=/dev/null
|
||||
. .venv/bin/activate
|
||||
python -m pip install -U pip virtualenv
|
||||
python -m pip install -r requirements.txt
|
||||
. .env/bin/activate
|
||||
python3 -m pip install -U pip virtualenv
|
||||
python3 -m pip install -r requirements.txt
|
||||
}
|
||||
|
||||
publicIpFromInterface() {
|
||||
echo "Couldn't find a valid ipv4 address, using the first IP found on the interfaces as the endpoint."
|
||||
DEFAULT_INTERFACE="$(ip -4 route list match default | grep -Eo "dev .*" | awk '{print $2}')"
|
||||
ENDPOINT=$(ip -4 addr sh dev "$DEFAULT_INTERFACE" | grep -w inet | head -n1 | awk '{print $2}' | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b')
|
||||
export ENDPOINT=$ENDPOINT
|
||||
export ENDPOINT="${ENDPOINT}"
|
||||
echo "Using ${ENDPOINT} as the endpoint"
|
||||
}
|
||||
|
||||
|
@ -66,7 +57,7 @@ publicIpFromMetadata() {
|
|||
fi
|
||||
|
||||
if echo "${ENDPOINT}" | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"; then
|
||||
export ENDPOINT=$ENDPOINT
|
||||
export ENDPOINT="${ENDPOINT}"
|
||||
echo "Using ${ENDPOINT} as the endpoint"
|
||||
else
|
||||
publicIpFromInterface
|
||||
|
@ -78,7 +69,7 @@ deployAlgo() {
|
|||
|
||||
cd /opt/algo
|
||||
# shellcheck source=/dev/null
|
||||
. .venv/bin/activate
|
||||
. .env/bin/activate
|
||||
|
||||
export HOME=/root
|
||||
export ANSIBLE_LOCAL_TEMP=/root/.ansible/tmp
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
[local]
|
||||
localhost ansible_connection=local ansible_python_interpreter=python
|
||||
localhost ansible_connection=local ansible_python_interpreter=python3
|
||||
|
|
288
library/digital_ocean_floating_ip.py
Normal file
288
library/digital_ocean_floating_ip.py
Normal file
|
@ -0,0 +1,288 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (c) 2015, Patrick F. Marques <patrickfmarques@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: digital_ocean_floating_ip
|
||||
short_description: Manage DigitalOcean Floating IPs
|
||||
description:
|
||||
- Create/delete/assign a floating IP.
|
||||
version_added: "2.4"
|
||||
author: "Patrick Marques (@pmarques)"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the target.
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
ip:
|
||||
description:
|
||||
- Public IP address of the Floating IP. Used to remove an IP
|
||||
region:
|
||||
description:
|
||||
- The region that the Floating IP is reserved to.
|
||||
droplet_id:
|
||||
description:
|
||||
- The Droplet that the Floating IP has been assigned to.
|
||||
oauth_token:
|
||||
description:
|
||||
- DigitalOcean OAuth token.
|
||||
required: true
|
||||
notes:
|
||||
- Version 2 of DigitalOcean API is used.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: "Create a Floating IP in region lon1"
|
||||
digital_ocean_floating_ip:
|
||||
state: present
|
||||
region: lon1
|
||||
|
||||
- name: "Create a Floating IP assigned to Droplet ID 123456"
|
||||
digital_ocean_floating_ip:
|
||||
state: present
|
||||
droplet_id: 123456
|
||||
|
||||
- name: "Delete a Floating IP with ip 1.2.3.4"
|
||||
digital_ocean_floating_ip:
|
||||
state: absent
|
||||
ip: "1.2.3.4"
|
||||
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''
|
||||
# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#floating-ips
|
||||
data:
|
||||
description: a DigitalOcean Floating IP resource
|
||||
returned: success and no resource constraint
|
||||
type: dict
|
||||
sample: {
|
||||
"action": {
|
||||
"id": 68212728,
|
||||
"status": "in-progress",
|
||||
"type": "assign_ip",
|
||||
"started_at": "2015-10-15T17:45:44Z",
|
||||
"completed_at": null,
|
||||
"resource_id": 758603823,
|
||||
"resource_type": "floating_ip",
|
||||
"region": {
|
||||
"name": "New York 3",
|
||||
"slug": "nyc3",
|
||||
"sizes": [
|
||||
"512mb",
|
||||
"1gb",
|
||||
"2gb",
|
||||
"4gb",
|
||||
"8gb",
|
||||
"16gb",
|
||||
"32gb",
|
||||
"48gb",
|
||||
"64gb"
|
||||
],
|
||||
"features": [
|
||||
"private_networking",
|
||||
"backups",
|
||||
"ipv6",
|
||||
"metadata"
|
||||
],
|
||||
"available": true
|
||||
},
|
||||
"region_slug": "nyc3"
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
import json
|
||||
import time
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.digital_ocean import DigitalOceanHelper
|
||||
|
||||
class Response(object):
|
||||
|
||||
def __init__(self, resp, info):
|
||||
self.body = None
|
||||
if resp:
|
||||
self.body = resp.read()
|
||||
self.info = info
|
||||
|
||||
@property
|
||||
def json(self):
|
||||
if not self.body:
|
||||
if "body" in self.info:
|
||||
return json.loads(self.info["body"])
|
||||
return None
|
||||
try:
|
||||
return json.loads(self.body)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
@property
|
||||
def status_code(self):
|
||||
return self.info["status"]
|
||||
|
||||
def wait_action(module, rest, ip, action_id, timeout=10):
|
||||
end_time = time.time() + 10
|
||||
while time.time() < end_time:
|
||||
response = rest.get('floating_ips/{0}/actions/{1}'.format(ip, action_id))
|
||||
status_code = response.status_code
|
||||
status = response.json['action']['status']
|
||||
# TODO: check status_code == 200?
|
||||
if status == 'completed':
|
||||
return True
|
||||
elif status == 'errored':
|
||||
module.fail_json(msg='Floating ip action error [ip: {0}: action: {1}]'.format(
|
||||
ip, action_id), data=json)
|
||||
|
||||
module.fail_json(msg='Floating ip action timeout [ip: {0}: action: {1}]'.format(
|
||||
ip, action_id), data=json)
|
||||
|
||||
|
||||
def core(module):
|
||||
api_token = module.params['oauth_token']
|
||||
state = module.params['state']
|
||||
ip = module.params['ip']
|
||||
droplet_id = module.params['droplet_id']
|
||||
|
||||
rest = DigitalOceanHelper(module)
|
||||
|
||||
if state in ('present'):
|
||||
if droplet_id is not None and module.params['ip'] is not None:
|
||||
# Lets try to associate the ip to the specified droplet
|
||||
associate_floating_ips(module, rest)
|
||||
else:
|
||||
create_floating_ips(module, rest)
|
||||
|
||||
elif state in ('absent'):
|
||||
response = rest.delete("floating_ips/{0}".format(ip))
|
||||
status_code = response.status_code
|
||||
json_data = response.json
|
||||
if status_code == 204:
|
||||
module.exit_json(changed=True)
|
||||
elif status_code == 404:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.exit_json(changed=False, data=json_data)
|
||||
|
||||
|
||||
def get_floating_ip_details(module, rest):
|
||||
ip = module.params['ip']
|
||||
|
||||
response = rest.get("floating_ips/{0}".format(ip))
|
||||
status_code = response.status_code
|
||||
json_data = response.json
|
||||
if status_code == 200:
|
||||
return json_data['floating_ip']
|
||||
else:
|
||||
module.fail_json(msg="Error assigning floating ip [{0}: {1}]".format(
|
||||
status_code, json_data["message"]), region=module.params['region'])
|
||||
|
||||
|
||||
def assign_floating_id_to_droplet(module, rest):
|
||||
ip = module.params['ip']
|
||||
|
||||
payload = {
|
||||
"type": "assign",
|
||||
"droplet_id": module.params['droplet_id'],
|
||||
}
|
||||
|
||||
response = rest.post("floating_ips/{0}/actions".format(ip), data=payload)
|
||||
status_code = response.status_code
|
||||
json_data = response.json
|
||||
if status_code == 201:
|
||||
wait_action(module, rest, ip, json_data['action']['id'])
|
||||
|
||||
module.exit_json(changed=True, data=json_data)
|
||||
else:
|
||||
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
|
||||
status_code, json_data["message"]), region=module.params['region'])
|
||||
|
||||
|
||||
def associate_floating_ips(module, rest):
|
||||
floating_ip = get_floating_ip_details(module, rest)
|
||||
droplet = floating_ip['droplet']
|
||||
|
||||
# TODO: If already assigned to a droplet verify if is one of the specified as valid
|
||||
if droplet is not None and str(droplet['id']) in [module.params['droplet_id']]:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
assign_floating_id_to_droplet(module, rest)
|
||||
|
||||
|
||||
def create_floating_ips(module, rest):
|
||||
payload = {
|
||||
}
|
||||
floating_ip_data = None
|
||||
|
||||
if module.params['region'] is not None:
|
||||
payload["region"] = module.params['region']
|
||||
|
||||
if module.params['droplet_id'] is not None:
|
||||
payload["droplet_id"] = module.params['droplet_id']
|
||||
|
||||
floating_ips = rest.get_paginated_data(base_url='floating_ips?', data_key_name='floating_ips')
|
||||
|
||||
for floating_ip in floating_ips:
|
||||
if floating_ip['droplet'] and floating_ip['droplet']['id'] == module.params['droplet_id']:
|
||||
floating_ip_data = {'floating_ip': floating_ip}
|
||||
|
||||
if floating_ip_data:
|
||||
module.exit_json(changed=False, data=floating_ip_data)
|
||||
else:
|
||||
response = rest.post("floating_ips", data=payload)
|
||||
status_code = response.status_code
|
||||
json_data = response.json
|
||||
|
||||
if status_code == 202:
|
||||
module.exit_json(changed=True, data=json_data)
|
||||
else:
|
||||
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
|
||||
status_code, json_data["message"]), region=module.params['region'])
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(choices=['present', 'absent'], default='present'),
|
||||
ip=dict(aliases=['id'], required=False),
|
||||
region=dict(required=False),
|
||||
droplet_id=dict(required=False, type='int'),
|
||||
oauth_token=dict(
|
||||
no_log=True,
|
||||
# Support environment variable for DigitalOcean OAuth Token
|
||||
fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN']),
|
||||
required=True,
|
||||
),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
timeout=dict(type='int', default=30),
|
||||
),
|
||||
required_if=[
|
||||
('state', 'delete', ['ip'])
|
||||
],
|
||||
mutually_exclusive=[
|
||||
['region', 'droplet_id']
|
||||
],
|
||||
)
|
||||
|
||||
core(module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,139 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright 2013 Google Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: gce_region_facts
|
||||
version_added: "5.3"
|
||||
short_description: Gather facts about GCE regions.
|
||||
description:
|
||||
- Gather facts about GCE regions.
|
||||
options:
|
||||
service_account_email:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
pem_file:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- path to the pem file associated with the service account email
|
||||
This option is deprecated. Use 'credentials_file'.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
credentials_file:
|
||||
version_added: "2.1.0"
|
||||
description:
|
||||
- path to the JSON file associated with the service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
project_id:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- your GCE project ID
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
|
||||
author: "Jack Ivanov (@jackivanov)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Gather facts about all regions
|
||||
- gce_region_facts:
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
regions:
|
||||
returned: on success
|
||||
description: >
|
||||
Each element consists of a dict with all the information related
|
||||
to that region.
|
||||
type: list
|
||||
sample: "[{
|
||||
"name": "asia-east1",
|
||||
"status": "UP",
|
||||
"zones": [
|
||||
{
|
||||
"name": "asia-east1-a",
|
||||
"status": "UP"
|
||||
},
|
||||
{
|
||||
"name": "asia-east1-b",
|
||||
"status": "UP"
|
||||
},
|
||||
{
|
||||
"name": "asia-east1-c",
|
||||
"status": "UP"
|
||||
}
|
||||
]
|
||||
}]"
|
||||
'''
|
||||
try:
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.compute.providers import get_driver
|
||||
from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
|
||||
_ = Provider.GCE
|
||||
HAS_LIBCLOUD = True
|
||||
except ImportError:
|
||||
HAS_LIBCLOUD = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.gce import gce_connect, unexpected_error_msg
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
service_account_email=dict(),
|
||||
pem_file=dict(type='path'),
|
||||
credentials_file=dict(type='path'),
|
||||
project_id=dict(),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_LIBCLOUD:
|
||||
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
|
||||
|
||||
gce = gce_connect(module)
|
||||
|
||||
changed = False
|
||||
gce_regions = []
|
||||
|
||||
try:
|
||||
regions = gce.ex_list_regions()
|
||||
for r in regions:
|
||||
gce_region = {}
|
||||
gce_region['name'] = r.name
|
||||
gce_region['status'] = r.status
|
||||
gce_region['zones'] = []
|
||||
for z in r.zones:
|
||||
gce_zone = {}
|
||||
gce_zone['name'] = z.name
|
||||
gce_zone['status'] = z.status
|
||||
gce_region['zones'].append(gce_zone)
|
||||
gce_regions.append(gce_region)
|
||||
json_output = { 'regions': gce_regions }
|
||||
module.exit_json(changed=False, results=json_output)
|
||||
except ResourceNotFoundError:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
93
library/gcp_compute_location_info.py
Normal file
93
library/gcp_compute_location_info.py
Normal file
|
@ -0,0 +1,93 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
################################################################################
|
||||
# Documentation
|
||||
################################################################################
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
|
||||
|
||||
################################################################################
|
||||
# Imports
|
||||
################################################################################
|
||||
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
|
||||
import json
|
||||
|
||||
################################################################################
|
||||
# Main
|
||||
################################################################################
|
||||
|
||||
|
||||
def main():
|
||||
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), scope=dict(required=True, type='str')))
|
||||
|
||||
if module._name == 'gcp_compute_image_facts':
|
||||
module.deprecate("The 'gcp_compute_image_facts' module has been renamed to 'gcp_compute_regions_info'", version='2.13')
|
||||
|
||||
if not module.params['scopes']:
|
||||
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
|
||||
|
||||
items = fetch_list(module, collection(module), query_options(module.params['filters']))
|
||||
if items.get('items'):
|
||||
items = items.get('items')
|
||||
else:
|
||||
items = []
|
||||
return_value = {'resources': items}
|
||||
module.exit_json(**return_value)
|
||||
|
||||
|
||||
def collection(module):
|
||||
return "https://www.googleapis.com/compute/v1/projects/{project}/{scope}".format(**module.params)
|
||||
|
||||
|
||||
def fetch_list(module, link, query):
|
||||
auth = GcpSession(module, 'compute')
|
||||
response = auth.get(link, params={'filter': query})
|
||||
return return_if_object(module, response)
|
||||
|
||||
|
||||
def query_options(filters):
|
||||
if not filters:
|
||||
return ''
|
||||
|
||||
if len(filters) == 1:
|
||||
return filters[0]
|
||||
else:
|
||||
queries = []
|
||||
for f in filters:
|
||||
# For multiple queries, all queries should have ()
|
||||
if f[0] != '(' and f[-1] != ')':
|
||||
queries.append("(%s)" % ''.join(f))
|
||||
else:
|
||||
queries.append(f)
|
||||
|
||||
return ' '.join(queries)
|
||||
|
||||
|
||||
def return_if_object(module, response):
|
||||
# If not found, return nothing.
|
||||
if response.status_code == 404:
|
||||
return None
|
||||
|
||||
# If no content, return nothing.
|
||||
if response.status_code == 204:
|
||||
return None
|
||||
|
||||
try:
|
||||
module.raise_for_status(response)
|
||||
result = response.json()
|
||||
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
|
||||
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
|
||||
|
||||
if navigate_hash(result, ['error', 'errors']):
|
||||
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,551 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lightsail
|
||||
short_description: Create or delete a virtual machine instance in AWS Lightsail
|
||||
description:
|
||||
- Creates or instances in AWS Lightsail and optionally wait for it to be 'running'.
|
||||
version_added: "2.4"
|
||||
author: "Nick Ball (@nickball)"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the target.
|
||||
default: present
|
||||
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
|
||||
name:
|
||||
description:
|
||||
- Name of the instance
|
||||
required: true
|
||||
default : null
|
||||
zone:
|
||||
description:
|
||||
- AWS availability zone in which to launch the instance. Required when state='present'
|
||||
required: false
|
||||
default: null
|
||||
blueprint_id:
|
||||
description:
|
||||
- ID of the instance blueprint image. Required when state='present'
|
||||
required: false
|
||||
default: null
|
||||
bundle_id:
|
||||
description:
|
||||
- Bundle of specification info for the instance. Required when state='present'
|
||||
required: false
|
||||
default: null
|
||||
user_data:
|
||||
description:
|
||||
- Launch script that can configure the instance with additional data
|
||||
required: false
|
||||
default: null
|
||||
key_pair_name:
|
||||
description:
|
||||
- Name of the key pair to use with the instance
|
||||
required: false
|
||||
default: null
|
||||
wait:
|
||||
description:
|
||||
- Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned
|
||||
default: "yes"
|
||||
choices: [ "yes", "no" ]
|
||||
wait_timeout:
|
||||
description:
|
||||
- How long before wait gives up, in seconds.
|
||||
default: 300
|
||||
open_ports:
|
||||
description:
|
||||
- Adds public ports to an Amazon Lightsail instance.
|
||||
default: null
|
||||
suboptions:
|
||||
from_port:
|
||||
description: Begin of the range
|
||||
required: true
|
||||
default: null
|
||||
to_port:
|
||||
description: End of the range
|
||||
required: true
|
||||
default: null
|
||||
protocol:
|
||||
description: Accepted traffic protocol.
|
||||
required: true
|
||||
choices:
|
||||
- udp
|
||||
- tcp
|
||||
- all
|
||||
default: null
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- boto3
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a new Lightsail instance, register the instance details
|
||||
- lightsail:
|
||||
state: present
|
||||
name: myinstance
|
||||
region: us-east-1
|
||||
zone: us-east-1a
|
||||
blueprint_id: ubuntu_16_04
|
||||
bundle_id: nano_1_0
|
||||
key_pair_name: id_rsa
|
||||
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
|
||||
wait_timeout: 500
|
||||
open_ports:
|
||||
- from_port: 4500
|
||||
to_port: 4500
|
||||
protocol: udp
|
||||
- from_port: 500
|
||||
to_port: 500
|
||||
protocol: udp
|
||||
register: my_instance
|
||||
|
||||
- debug:
|
||||
msg: "Name is {{ my_instance.instance.name }}"
|
||||
|
||||
- debug:
|
||||
msg: "IP is {{ my_instance.instance.publicIpAddress }}"
|
||||
|
||||
# Delete an instance if present
|
||||
- lightsail:
|
||||
state: absent
|
||||
region: us-east-1
|
||||
name: myinstance
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
changed:
|
||||
description: if a snapshot has been modified/created
|
||||
returned: always
|
||||
type: bool
|
||||
sample:
|
||||
changed: true
|
||||
instance:
|
||||
description: instance data
|
||||
returned: always
|
||||
type: dict
|
||||
sample:
|
||||
arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
|
||||
blueprint_id: "ubuntu_16_04"
|
||||
blueprint_name: "Ubuntu"
|
||||
bundle_id: "nano_1_0"
|
||||
created_at: "2017-03-27T08:38:59.714000-04:00"
|
||||
hardware:
|
||||
cpu_count: 1
|
||||
ram_size_in_gb: 0.5
|
||||
is_static_ip: false
|
||||
location:
|
||||
availability_zone: "us-east-1a"
|
||||
region_name: "us-east-1"
|
||||
name: "my_instance"
|
||||
networking:
|
||||
monthly_transfer:
|
||||
gb_per_month_allocated: 1024
|
||||
ports:
|
||||
- access_direction: "inbound"
|
||||
access_from: "Anywhere (0.0.0.0/0)"
|
||||
access_type: "public"
|
||||
common_name: ""
|
||||
from_port: 80
|
||||
protocol: tcp
|
||||
to_port: 80
|
||||
- access_direction: "inbound"
|
||||
access_from: "Anywhere (0.0.0.0/0)"
|
||||
access_type: "public"
|
||||
common_name: ""
|
||||
from_port: 22
|
||||
protocol: tcp
|
||||
to_port: 22
|
||||
private_ip_address: "172.26.8.14"
|
||||
public_ip_address: "34.207.152.202"
|
||||
resource_type: "Instance"
|
||||
ssh_key_name: "keypair"
|
||||
state:
|
||||
code: 16
|
||||
name: running
|
||||
support_code: "588307843083/i-0997c97831ee21e33"
|
||||
username: "ubuntu"
|
||||
'''
|
||||
|
||||
import time
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import botocore
|
||||
HAS_BOTOCORE = True
|
||||
except ImportError:
|
||||
HAS_BOTOCORE = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
except ImportError:
|
||||
# will be caught by imported HAS_BOTO3
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
|
||||
HAS_BOTO3, camel_dict_to_snake_dict)
|
||||
|
||||
|
||||
def create_instance(module, client, instance_name):
|
||||
"""
|
||||
Create an instance
|
||||
|
||||
module: Ansible module object
|
||||
client: authenticated lightsail connection object
|
||||
instance_name: name of instance to delete
|
||||
|
||||
Returns a dictionary of instance information
|
||||
about the new instance.
|
||||
|
||||
"""
|
||||
|
||||
changed = False
|
||||
|
||||
# Check if instance already exists
|
||||
inst = None
|
||||
try:
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] != 'NotFoundException':
|
||||
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
zone = module.params.get('zone')
|
||||
blueprint_id = module.params.get('blueprint_id')
|
||||
bundle_id = module.params.get('bundle_id')
|
||||
user_data = module.params.get('user_data')
|
||||
user_data = '' if user_data is None else user_data
|
||||
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
wait_max = time.time() + wait_timeout
|
||||
|
||||
if module.params.get('key_pair_name'):
|
||||
key_pair_name = module.params.get('key_pair_name')
|
||||
else:
|
||||
key_pair_name = ''
|
||||
|
||||
if module.params.get('open_ports'):
|
||||
open_ports = module.params.get('open_ports')
|
||||
else:
|
||||
open_ports = '[]'
|
||||
|
||||
resp = None
|
||||
if inst is None:
|
||||
try:
|
||||
resp = client.create_instances(
|
||||
instanceNames=[
|
||||
instance_name
|
||||
],
|
||||
availabilityZone=zone,
|
||||
blueprintId=blueprint_id,
|
||||
bundleId=bundle_id,
|
||||
userData=user_data,
|
||||
keyPairName=key_pair_name,
|
||||
)
|
||||
resp = resp['operations'][0]
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
|
||||
# Wait for instance to become running
|
||||
if wait:
|
||||
while (wait_max > time.time()) and (inst is not None and inst['state']['name'] != "running"):
|
||||
try:
|
||||
time.sleep(2)
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
|
||||
exception=traceback.format_exc())
|
||||
elif e.response['Error']['Code'] == "RequestExpired":
|
||||
module.fail_json(msg="RequestExpired: Failed to start instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||
time.sleep(1)
|
||||
|
||||
# Timed out
|
||||
if wait and not changed and wait_max <= time.time():
|
||||
module.fail_json(msg="Wait for instance start timeout at %s" % time.asctime())
|
||||
|
||||
# Attempt to open ports
|
||||
if open_ports:
|
||||
if inst is not None:
|
||||
try:
|
||||
for o in open_ports:
|
||||
resp = client.open_instance_public_ports(
|
||||
instanceName=instance_name,
|
||||
portInfo={
|
||||
'fromPort': o['from_port'],
|
||||
'toPort': o['to_port'],
|
||||
'protocol': o['protocol']
|
||||
}
|
||||
)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg='Error opening ports for instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
changed = True
|
||||
|
||||
return (changed, inst)
|
||||
|
||||
|
||||
def delete_instance(module, client, instance_name):
|
||||
"""
|
||||
Terminates an instance
|
||||
|
||||
module: Ansible module object
|
||||
client: authenticated lightsail connection object
|
||||
instance_name: name of instance to delete
|
||||
|
||||
Returns a dictionary of instance information
|
||||
about the instance deleted (pre-deletion).
|
||||
|
||||
If the instance to be deleted is running
|
||||
"changed" will be set to False.
|
||||
|
||||
"""
|
||||
|
||||
# It looks like deleting removes the instance immediately, nothing to wait for
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
wait_max = time.time() + wait_timeout
|
||||
|
||||
changed = False
|
||||
|
||||
inst = None
|
||||
try:
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] != 'NotFoundException':
|
||||
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
# Wait for instance to exit transition state before deleting
|
||||
if wait:
|
||||
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
|
||||
try:
|
||||
time.sleep(5)
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||
module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
|
||||
exception=traceback.format_exc())
|
||||
elif e.response['Error']['Code'] == "RequestExpired":
|
||||
module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||
# sleep and retry
|
||||
time.sleep(10)
|
||||
|
||||
# Attempt to delete
|
||||
if inst is not None:
|
||||
while not changed and ((wait and wait_max > time.time()) or (not wait)):
|
||||
try:
|
||||
client.delete_instance(instanceName=instance_name)
|
||||
changed = True
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
# Timed out
|
||||
if wait and not changed and wait_max <= time.time():
|
||||
module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime())
|
||||
|
||||
return (changed, inst)
|
||||
|
||||
|
||||
def restart_instance(module, client, instance_name):
|
||||
"""
|
||||
Reboot an existing instance
|
||||
|
||||
module: Ansible module object
|
||||
client: authenticated lightsail connection object
|
||||
instance_name: name of instance to reboot
|
||||
|
||||
Returns a dictionary of instance information
|
||||
about the restarted instance
|
||||
|
||||
If the instance was not able to reboot,
|
||||
"changed" will be set to False.
|
||||
|
||||
Wait will not apply here as this is an OS-level operation
|
||||
"""
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
wait_max = time.time() + wait_timeout
|
||||
|
||||
changed = False
|
||||
|
||||
inst = None
|
||||
try:
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] != 'NotFoundException':
|
||||
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
# Wait for instance to exit transition state before state change
|
||||
if wait:
|
||||
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
|
||||
try:
|
||||
time.sleep(5)
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||
module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
|
||||
exception=traceback.format_exc())
|
||||
elif e.response['Error']['Code'] == "RequestExpired":
|
||||
module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||
time.sleep(3)
|
||||
|
||||
# send reboot
|
||||
if inst is not None:
|
||||
try:
|
||||
client.reboot_instance(instanceName=instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] != 'NotFoundException':
|
||||
module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e))
|
||||
changed = True
|
||||
|
||||
return (changed, inst)
|
||||
|
||||
|
||||
def startstop_instance(module, client, instance_name, state):
|
||||
"""
|
||||
Starts or stops an existing instance
|
||||
|
||||
module: Ansible module object
|
||||
client: authenticated lightsail connection object
|
||||
instance_name: name of instance to start/stop
|
||||
state: Target state ("running" or "stopped")
|
||||
|
||||
Returns a dictionary of instance information
|
||||
about the instance started/stopped
|
||||
|
||||
If the instance was not able to state change,
|
||||
"changed" will be set to False.
|
||||
|
||||
"""
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
wait_max = time.time() + wait_timeout
|
||||
|
||||
changed = False
|
||||
|
||||
inst = None
|
||||
try:
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] != 'NotFoundException':
|
||||
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
# Wait for instance to exit transition state before state change
|
||||
if wait:
|
||||
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
|
||||
try:
|
||||
time.sleep(5)
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
|
||||
exception=traceback.format_exc())
|
||||
elif e.response['Error']['Code'] == "RequestExpired":
|
||||
module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||
time.sleep(1)
|
||||
|
||||
# Try state change
|
||||
if inst is not None and inst['state']['name'] != state:
|
||||
try:
|
||||
if state == 'running':
|
||||
client.start_instance(instanceName=instance_name)
|
||||
else:
|
||||
client.stop_instance(instanceName=instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e))
|
||||
changed = True
|
||||
# Grab current instance info
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
|
||||
return (changed, inst)
|
||||
|
||||
|
||||
def core(module):
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if not region:
|
||||
module.fail_json(msg='region must be specified')
|
||||
|
||||
client = None
|
||||
try:
|
||||
client = boto3_conn(module, conn_type='client', resource='lightsail',
|
||||
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
|
||||
module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
|
||||
|
||||
changed = False
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
|
||||
if state == 'absent':
|
||||
changed, instance_dict = delete_instance(module, client, name)
|
||||
elif state in ('running', 'stopped'):
|
||||
changed, instance_dict = startstop_instance(module, client, name, state)
|
||||
elif state == 'restarted':
|
||||
changed, instance_dict = restart_instance(module, client, name)
|
||||
elif state == 'present':
|
||||
changed, instance_dict = create_instance(module, client, name)
|
||||
|
||||
module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
|
||||
|
||||
|
||||
def _find_instance_info(client, instance_name):
|
||||
''' handle exceptions where this function is called '''
|
||||
inst = None
|
||||
try:
|
||||
inst = client.get_instance(instanceName=instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
raise
|
||||
return inst['instance']
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']),
|
||||
zone=dict(type='str'),
|
||||
blueprint_id=dict(type='str'),
|
||||
bundle_id=dict(type='str'),
|
||||
key_pair_name=dict(type='str'),
|
||||
user_data=dict(type='str'),
|
||||
wait=dict(type='bool', default=True),
|
||||
wait_timeout=dict(default=300),
|
||||
open_ports=dict(type='list')
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='Python module "boto3" is missing, please install it')
|
||||
|
||||
if not HAS_BOTOCORE:
|
||||
module.fail_json(msg='Python module "botocore" is missing, please install it')
|
||||
|
||||
try:
|
||||
core(module)
|
||||
except (botocore.exceptions.ClientError, Exception) as e:
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -93,7 +93,7 @@ def main():
|
|||
response = client.get_regions(
|
||||
includeAvailabilityZones=False
|
||||
)
|
||||
module.exit_json(changed=False, results=response)
|
||||
module.exit_json(changed=False, data=response)
|
||||
except (botocore.exceptions.ClientError, Exception) as e:
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
|
||||
|
|
113
library/linode_stackscript_v4.py
Normal file
113
library/linode_stackscript_v4.py
Normal file
|
@ -0,0 +1,113 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
|
||||
from ansible.module_utils.linode import get_user_agent
|
||||
|
||||
LINODE_IMP_ERR = None
|
||||
try:
|
||||
from linode_api4 import StackScript, LinodeClient
|
||||
HAS_LINODE_DEPENDENCY = True
|
||||
except ImportError:
|
||||
LINODE_IMP_ERR = traceback.format_exc()
|
||||
HAS_LINODE_DEPENDENCY = False
|
||||
|
||||
|
||||
def create_stackscript(module, client, **kwargs):
|
||||
"""Creates a stackscript and handles return format."""
|
||||
try:
|
||||
response = client.linode.stackscript_create(**kwargs)
|
||||
return response._raw_json
|
||||
except Exception as exception:
|
||||
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
|
||||
|
||||
|
||||
def stackscript_available(module, client):
|
||||
"""Try to retrieve a stackscript."""
|
||||
try:
|
||||
label = module.params['label']
|
||||
desc = module.params['description']
|
||||
|
||||
result = client.linode.stackscripts(StackScript.label == label,
|
||||
StackScript.description == desc,
|
||||
mine_only=True
|
||||
)
|
||||
return result[0]
|
||||
except IndexError:
|
||||
return None
|
||||
except Exception as exception:
|
||||
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
|
||||
|
||||
|
||||
def initialise_module():
|
||||
"""Initialise the module parameter specification."""
|
||||
return AnsibleModule(
|
||||
argument_spec=dict(
|
||||
label=dict(type='str', required=True),
|
||||
state=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
access_token=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
|
||||
),
|
||||
script=dict(type='str', required=True),
|
||||
images=dict(type='list', required=True),
|
||||
description=dict(type='str', required=False),
|
||||
public=dict(type='bool', required=False, default=False),
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
|
||||
def build_client(module):
|
||||
"""Build a LinodeClient."""
|
||||
return LinodeClient(
|
||||
module.params['access_token'],
|
||||
user_agent=get_user_agent('linode_v4_module')
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""Module entrypoint."""
|
||||
module = initialise_module()
|
||||
|
||||
if not HAS_LINODE_DEPENDENCY:
|
||||
module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
|
||||
|
||||
client = build_client(module)
|
||||
stackscript = stackscript_available(module, client)
|
||||
|
||||
if module.params['state'] == 'present' and stackscript is not None:
|
||||
module.exit_json(changed=False, stackscript=stackscript._raw_json)
|
||||
|
||||
elif module.params['state'] == 'present' and stackscript is None:
|
||||
stackscript_json = create_stackscript(
|
||||
module, client,
|
||||
label=module.params['label'],
|
||||
script=module.params['script'],
|
||||
images=module.params['images'],
|
||||
desc=module.params['description'],
|
||||
public=module.params['public'],
|
||||
)
|
||||
module.exit_json(changed=True, stackscript=stackscript_json)
|
||||
|
||||
elif module.params['state'] == 'absent' and stackscript is not None:
|
||||
stackscript.delete()
|
||||
module.exit_json(changed=True, stackscript=stackscript._raw_json)
|
||||
|
||||
elif module.params['state'] == 'absent' and stackscript is None:
|
||||
module.exit_json(changed=False, stackscript={})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
142
library/linode_v4.py
Normal file
142
library/linode_v4.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
|
||||
from ansible.module_utils.linode import get_user_agent
|
||||
|
||||
LINODE_IMP_ERR = None
|
||||
try:
|
||||
from linode_api4 import Instance, LinodeClient
|
||||
HAS_LINODE_DEPENDENCY = True
|
||||
except ImportError:
|
||||
LINODE_IMP_ERR = traceback.format_exc()
|
||||
HAS_LINODE_DEPENDENCY = False
|
||||
|
||||
|
||||
def create_linode(module, client, **kwargs):
|
||||
"""Creates a Linode instance and handles return format."""
|
||||
if kwargs['root_pass'] is None:
|
||||
kwargs.pop('root_pass')
|
||||
|
||||
try:
|
||||
response = client.linode.instance_create(**kwargs)
|
||||
except Exception as exception:
|
||||
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
|
||||
|
||||
try:
|
||||
if isinstance(response, tuple):
|
||||
instance, root_pass = response
|
||||
instance_json = instance._raw_json
|
||||
instance_json.update({'root_pass': root_pass})
|
||||
return instance_json
|
||||
else:
|
||||
return response._raw_json
|
||||
except TypeError:
|
||||
module.fail_json(msg='Unable to parse Linode instance creation'
|
||||
' response. Please raise a bug against this'
|
||||
' module on https://github.com/ansible/ansible/issues'
|
||||
)
|
||||
|
||||
|
||||
def maybe_instance_from_label(module, client):
|
||||
"""Try to retrieve an instance based on a label."""
|
||||
try:
|
||||
label = module.params['label']
|
||||
result = client.linode.instances(Instance.label == label)
|
||||
return result[0]
|
||||
except IndexError:
|
||||
return None
|
||||
except Exception as exception:
|
||||
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
|
||||
|
||||
|
||||
def initialise_module():
|
||||
"""Initialise the module parameter specification."""
|
||||
return AnsibleModule(
|
||||
argument_spec=dict(
|
||||
label=dict(type='str', required=True),
|
||||
state=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
access_token=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
|
||||
),
|
||||
authorized_keys=dict(type='list', required=False),
|
||||
group=dict(type='str', required=False),
|
||||
image=dict(type='str', required=False),
|
||||
region=dict(type='str', required=False),
|
||||
root_pass=dict(type='str', required=False, no_log=True),
|
||||
tags=dict(type='list', required=False),
|
||||
type=dict(type='str', required=False),
|
||||
stackscript_id=dict(type='int', required=False),
|
||||
),
|
||||
supports_check_mode=False,
|
||||
required_one_of=(
|
||||
['state', 'label'],
|
||||
),
|
||||
required_together=(
|
||||
['region', 'image', 'type'],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def build_client(module):
|
||||
"""Build a LinodeClient."""
|
||||
return LinodeClient(
|
||||
module.params['access_token'],
|
||||
user_agent=get_user_agent('linode_v4_module')
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""Module entrypoint."""
|
||||
module = initialise_module()
|
||||
|
||||
if not HAS_LINODE_DEPENDENCY:
|
||||
module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
|
||||
|
||||
client = build_client(module)
|
||||
instance = maybe_instance_from_label(module, client)
|
||||
|
||||
if module.params['state'] == 'present' and instance is not None:
|
||||
module.exit_json(changed=False, instance=instance._raw_json)
|
||||
|
||||
elif module.params['state'] == 'present' and instance is None:
|
||||
instance_json = create_linode(
|
||||
module, client,
|
||||
authorized_keys=module.params['authorized_keys'],
|
||||
group=module.params['group'],
|
||||
image=module.params['image'],
|
||||
label=module.params['label'],
|
||||
region=module.params['region'],
|
||||
root_pass=module.params['root_pass'],
|
||||
tags=module.params['tags'],
|
||||
ltype=module.params['type'],
|
||||
stackscript_id=module.params['stackscript_id'],
|
||||
)
|
||||
module.exit_json(changed=True, instance=instance_json)
|
||||
|
||||
elif module.params['state'] == 'absent' and instance is not None:
|
||||
instance.delete()
|
||||
module.exit_json(changed=True, instance=instance._raw_json)
|
||||
|
||||
elif module.params['state'] == 'absent' and instance is None:
|
||||
module.exit_json(changed=False, instance={})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -29,6 +29,15 @@ extends_documentation_fragment: scaleway
|
|||
|
||||
options:
|
||||
|
||||
public_ip:
|
||||
description:
|
||||
- Manage public IP on a Scaleway server
|
||||
- Could be Scaleway IP address UUID
|
||||
- C(dynamic) Means that IP is destroyed at the same time the host is destroyed
|
||||
- C(absent) Means no public IP at all
|
||||
version_added: '2.8'
|
||||
default: absent
|
||||
|
||||
enable_ipv6:
|
||||
description:
|
||||
- Enable public IPv6 connectivity on the instance
|
||||
|
@ -88,26 +97,6 @@ options:
|
|||
description:
|
||||
- Commercial name of the compute node
|
||||
required: true
|
||||
choices:
|
||||
- ARM64-2GB
|
||||
- ARM64-4GB
|
||||
- ARM64-8GB
|
||||
- ARM64-16GB
|
||||
- ARM64-32GB
|
||||
- ARM64-64GB
|
||||
- ARM64-128GB
|
||||
- C1
|
||||
- C2S
|
||||
- C2M
|
||||
- C2L
|
||||
- START1-XS
|
||||
- START1-S
|
||||
- START1-M
|
||||
- START1-L
|
||||
- X64-15GB
|
||||
- X64-30GB
|
||||
- X64-60GB
|
||||
- X64-120GB
|
||||
|
||||
wait:
|
||||
description:
|
||||
|
@ -126,6 +115,13 @@ options:
|
|||
- Time to wait before every attempt to check the state of the server
|
||||
required: false
|
||||
default: 3
|
||||
|
||||
security_group:
|
||||
description:
|
||||
- Security group unique identifier
|
||||
- If no value provided, the default security group or current security group will be used
|
||||
required: false
|
||||
version_added: "2.8"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -141,6 +137,19 @@ EXAMPLES = '''
|
|||
- test
|
||||
- www
|
||||
|
||||
- name: Create a server attached to a security group
|
||||
scaleway_compute:
|
||||
name: foobar
|
||||
state: present
|
||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
region: ams1
|
||||
commercial_type: VC1S
|
||||
security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
|
||||
tags:
|
||||
- test
|
||||
- www
|
||||
|
||||
- name: Destroy it right after
|
||||
scaleway_compute:
|
||||
name: foobar
|
||||
|
@ -161,34 +170,6 @@ from ansible.module_utils.basic import AnsibleModule
|
|||
from ansible.module_utils.six.moves.urllib.parse import quote as urlquote
|
||||
from ansible.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
|
||||
|
||||
SCALEWAY_COMMERCIAL_TYPES = [
|
||||
|
||||
# Virtual ARM64 compute instance
|
||||
'ARM64-2GB',
|
||||
'ARM64-4GB',
|
||||
'ARM64-8GB',
|
||||
'ARM64-16GB',
|
||||
'ARM64-32GB',
|
||||
'ARM64-64GB',
|
||||
'ARM64-128GB',
|
||||
|
||||
# Baremetal
|
||||
'C1', # ARM64 (4 cores) - 2GB
|
||||
'C2S', # X86-64 (4 cores) - 8GB
|
||||
'C2M', # X86-64 (8 cores) - 16GB
|
||||
'C2L', # x86-64 (8 cores) - 32 GB
|
||||
|
||||
# Virtual X86-64 compute instance
|
||||
'START1-XS', # Starter X86-64 (1 core) - 1GB - 25 GB NVMe
|
||||
'START1-S', # Starter X86-64 (2 cores) - 2GB - 50 GB NVMe
|
||||
'START1-M', # Starter X86-64 (4 cores) - 4GB - 100 GB NVMe
|
||||
'START1-L', # Starter X86-64 (8 cores) - 8GB - 200 GB NVMe
|
||||
'X64-15GB',
|
||||
'X64-30GB',
|
||||
'X64-60GB',
|
||||
'X64-120GB',
|
||||
]
|
||||
|
||||
SCALEWAY_SERVER_STATES = (
|
||||
'stopped',
|
||||
'stopping',
|
||||
|
@ -204,6 +185,17 @@ SCALEWAY_TRANSITIONS_STATES = (
|
|||
)
|
||||
|
||||
|
||||
def check_image_id(compute_api, image_id):
|
||||
response = compute_api.get(path="images")
|
||||
|
||||
if response.ok and response.json:
|
||||
image_ids = [image["id"] for image in response.json["images"]]
|
||||
if image_id not in image_ids:
|
||||
compute_api.module.fail_json(msg='Error in getting image %s on %s' % (image_id, compute_api.module.params.get('api_url')))
|
||||
else:
|
||||
compute_api.module.fail_json(msg="Error in getting images from: %s" % compute_api.module.params.get('api_url'))
|
||||
|
||||
|
||||
def fetch_state(compute_api, server):
|
||||
compute_api.module.debug("fetch_state of server: %s" % server["id"])
|
||||
response = compute_api.get(path="servers/%s" % server["id"])
|
||||
|
@ -242,17 +234,51 @@ def wait_to_complete_state_transition(compute_api, server):
|
|||
compute_api.module.fail_json(msg="Server takes too long to finish its transition")
|
||||
|
||||
|
||||
def public_ip_payload(compute_api, public_ip):
|
||||
# We don't want a public ip
|
||||
if public_ip in ("absent",):
|
||||
return {"dynamic_ip_required": False}
|
||||
|
||||
# IP is only attached to the instance and is released as soon as the instance terminates
|
||||
if public_ip in ("dynamic", "allocated"):
|
||||
return {"dynamic_ip_required": True}
|
||||
|
||||
# We check that the IP we want to attach exists, if so its ID is returned
|
||||
response = compute_api.get("ips")
|
||||
if not response.ok:
|
||||
msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
ip_list = []
|
||||
try:
|
||||
ip_list = response.json["ips"]
|
||||
except KeyError:
|
||||
compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json)
|
||||
|
||||
lookup = [ip["id"] for ip in ip_list]
|
||||
if public_ip in lookup:
|
||||
return {"public_ip": public_ip}
|
||||
|
||||
|
||||
def create_server(compute_api, server):
|
||||
compute_api.module.debug("Starting a create_server")
|
||||
target_server = None
|
||||
response = compute_api.post(path="servers",
|
||||
data = {"enable_ipv6": server["enable_ipv6"],
|
||||
"boot_type": server["boot_type"],
|
||||
"tags": server["tags"],
|
||||
"commercial_type": server["commercial_type"],
|
||||
"image": server["image"],
|
||||
"dynamic_ip_required": server["dynamic_ip_required"],
|
||||
"name": server["name"],
|
||||
"organization": server["organization"]})
|
||||
"organization": server["organization"]
|
||||
}
|
||||
|
||||
if server["boot_type"]:
|
||||
data["boot_type"] = server["boot_type"]
|
||||
|
||||
if server["security_group"]:
|
||||
data["security_group"] = server["security_group"]
|
||||
|
||||
response = compute_api.post(path="servers", data=data)
|
||||
|
||||
if not response.ok:
|
||||
msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json)
|
||||
|
@ -325,7 +351,7 @@ def present_strategy(compute_api, wished_server):
|
|||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
|
||||
|
||||
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
|
||||
return changed, target_server
|
||||
|
||||
|
@ -347,7 +373,7 @@ def absent_strategy(compute_api, wished_server):
|
|||
return changed, {"status": "Server %s would be made absent." % target_server["id"]}
|
||||
|
||||
# A server MUST be stopped to be deleted.
|
||||
while not fetch_state(compute_api=compute_api, server=target_server) == "stopped":
|
||||
while fetch_state(compute_api=compute_api, server=target_server) != "stopped":
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
|
||||
response = stop_server(compute_api=compute_api, server=target_server)
|
||||
|
||||
|
@ -388,7 +414,7 @@ def running_strategy(compute_api, wished_server):
|
|||
if compute_api.module.check_mode:
|
||||
return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]}
|
||||
|
||||
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
|
||||
current_state = fetch_state(compute_api=compute_api, server=target_server)
|
||||
if current_state not in ("running", "starting"):
|
||||
|
@ -432,7 +458,7 @@ def stop_strategy(compute_api, wished_server):
|
|||
return changed, {
|
||||
"status": "Server %s attributes would be changed before stopping it." % target_server["id"]}
|
||||
|
||||
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
|
||||
|
||||
|
@ -479,7 +505,7 @@ def restart_strategy(compute_api, wished_server):
|
|||
return changed, {
|
||||
"status": "Server %s attributes would be changed before rebooting it." % target_server["id"]}
|
||||
|
||||
server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
|
||||
|
||||
changed = True
|
||||
if compute_api.module.check_mode:
|
||||
|
@ -518,8 +544,8 @@ state_strategy = {
|
|||
def find(compute_api, wished_server, per_page=1):
|
||||
compute_api.module.debug("Getting inside find")
|
||||
# Only the name attribute is accepted in the Compute query API
|
||||
url = 'servers?name=%s&per_page=%d' % (urlquote(wished_server["name"]), per_page)
|
||||
response = compute_api.get(url)
|
||||
response = compute_api.get("servers", params={"name": wished_server["name"],
|
||||
"per_page": per_page})
|
||||
|
||||
if not response.ok:
|
||||
msg = 'Error during server search: (%s) %s' % (response.status_code, response.json)
|
||||
|
@ -535,6 +561,7 @@ PATCH_MUTABLE_SERVER_ATTRIBUTES = (
|
|||
"tags",
|
||||
"name",
|
||||
"dynamic_ip_required",
|
||||
"security_group",
|
||||
)
|
||||
|
||||
|
||||
|
@ -546,29 +573,51 @@ def server_attributes_should_be_changed(compute_api, target_server, wished_serve
|
|||
for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
|
||||
if x in target_server and x in wished_server)
|
||||
compute_api.module.debug("Debug dict %s" % debug_dict)
|
||||
|
||||
try:
|
||||
return any([target_server[x] != wished_server[x]
|
||||
for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
|
||||
if x in target_server and x in wished_server])
|
||||
for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
|
||||
if key in target_server and key in wished_server:
|
||||
# When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
|
||||
if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys(
|
||||
) and target_server[key]["id"] != wished_server[key]:
|
||||
return True
|
||||
# Handling other structure compare simply the two objects content
|
||||
elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]:
|
||||
return True
|
||||
return False
|
||||
except AttributeError:
|
||||
compute_api.module.fail_json(msg="Error while checking if attributes should be changed")
|
||||
|
||||
|
||||
def server_change_attributes(compute_api, target_server, wished_server):
|
||||
compute_api.module.debug("Starting patching server attributes")
|
||||
patch_payload = dict((x, wished_server[x])
|
||||
for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
|
||||
if x in wished_server and x in target_server)
|
||||
patch_payload = dict()
|
||||
|
||||
for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
|
||||
if key in target_server and key in wished_server:
|
||||
# When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
|
||||
if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]:
|
||||
# Setting all key to current value except ID
|
||||
key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id")
|
||||
# Setting ID to the user specified ID
|
||||
key_dict["id"] = wished_server[key]
|
||||
patch_payload[key] = key_dict
|
||||
elif not isinstance(target_server[key], dict):
|
||||
patch_payload[key] = wished_server[key]
|
||||
|
||||
response = compute_api.patch(path="servers/%s" % target_server["id"],
|
||||
data=patch_payload)
|
||||
if not response.ok:
|
||||
msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
try:
|
||||
target_server = response.json["server"]
|
||||
except KeyError:
|
||||
compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
|
||||
|
||||
wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
|
||||
|
||||
return response
|
||||
return target_server
|
||||
|
||||
|
||||
def core(module):
|
||||
|
@ -581,12 +630,19 @@ def core(module):
|
|||
"enable_ipv6": module.params["enable_ipv6"],
|
||||
"boot_type": module.params["boot_type"],
|
||||
"tags": module.params["tags"],
|
||||
"organization": module.params["organization"]
|
||||
"organization": module.params["organization"],
|
||||
"security_group": module.params["security_group"]
|
||||
}
|
||||
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
|
||||
|
||||
compute_api = Scaleway(module=module)
|
||||
|
||||
check_image_id(compute_api, wished_server["image"])
|
||||
|
||||
# IP parameters of the wished server depends on the configuration
|
||||
ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"])
|
||||
wished_server.update(ip_payload)
|
||||
|
||||
changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server)
|
||||
module.exit_json(changed=changed, msg=summary)
|
||||
|
||||
|
@ -597,15 +653,17 @@ def main():
|
|||
image=dict(required=True),
|
||||
name=dict(),
|
||||
region=dict(required=True, choices=SCALEWAY_LOCATION.keys()),
|
||||
commercial_type=dict(required=True, choices=SCALEWAY_COMMERCIAL_TYPES),
|
||||
commercial_type=dict(required=True),
|
||||
enable_ipv6=dict(default=False, type="bool"),
|
||||
boot_type=dict(default="bootscript"),
|
||||
boot_type=dict(choices=['bootscript', 'local']),
|
||||
public_ip=dict(default="absent"),
|
||||
state=dict(choices=state_strategy.keys(), default='present'),
|
||||
tags=dict(type="list", default=[]),
|
||||
organization=dict(required=True),
|
||||
wait=dict(type="bool", default=False),
|
||||
wait_timeout=dict(type="int", default=300),
|
||||
wait_sleep_time=dict(type="int", default=3),
|
||||
security_group=dict(),
|
||||
))
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
|
|
37
main.yml
37
main.yml
|
@ -2,6 +2,18 @@
|
|||
- hosts: localhost
|
||||
become: false
|
||||
tasks:
|
||||
- name: Playbook dir stat
|
||||
stat:
|
||||
path: "{{ playbook_dir }}"
|
||||
register: _playbook_dir
|
||||
|
||||
- name: Ensure Ansible is not being run in a world writable directory
|
||||
assert:
|
||||
that: _playbook_dir.stat.mode|int <= 775
|
||||
msg: >
|
||||
Ansible is being run in a world writable directory ({{ playbook_dir }}), ignoring it as an ansible.cfg source.
|
||||
For more information see https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir
|
||||
|
||||
- name: Ensure the requirements installed
|
||||
debug:
|
||||
msg: "{{ '' | ipaddr }}"
|
||||
|
@ -11,21 +23,32 @@
|
|||
|
||||
- name: Set required ansible version as a fact
|
||||
set_fact:
|
||||
required_ansible_version:
|
||||
"{{ item | regex_replace('^ansible[\\s+]?(?P<op>[=,>,<]+)[\\s+]?(?P<ver>\\d.\\d(.\\d+)?)$',
|
||||
'{\"op\": \"\\g<op>\",\"ver\": \"\\g<ver>\" }') }}"
|
||||
required_ansible_version: "{{ item | regex_replace('^ansible[\\s+]?(?P<op>[=,>,<]+)[\\s+]?(?P<ver>\\d.\\d+(.\\d+)?)$', '{\"op\": \"\\g<op>\",\"ver\"\
|
||||
: \"\\g<ver>\" }') }}"
|
||||
when: '"ansible" in item'
|
||||
with_items: "{{ lookup('file', 'requirements.txt').splitlines() }}"
|
||||
|
||||
- name: Verify Ansible meets Algo VPN requirements.
|
||||
- name: Just get the list from default pip
|
||||
community.general.pip_package_info:
|
||||
register: pip_package_info
|
||||
|
||||
- name: Verify Python meets Algo VPN requirements
|
||||
assert:
|
||||
that: (ansible_python.version.major|string + '.' + ansible_python.version.minor|string) is version('3.8', '>=')
|
||||
msg: >
|
||||
Python version is not supported.
|
||||
You must upgrade to at least Python 3.8 to use this version of Algo.
|
||||
See for more details - https://trailofbits.github.io/algo/troubleshooting.html#python-version-is-not-supported
|
||||
|
||||
- name: Verify Ansible meets Algo VPN requirements
|
||||
assert:
|
||||
that:
|
||||
- ansible_version.full is version(required_ansible_version.ver, required_ansible_version.op)
|
||||
- pip_package_info.packages.pip.ansible.0.version is version(required_ansible_version.ver, required_ansible_version.op)
|
||||
- not ipaddr.failed
|
||||
msg: >
|
||||
Ansible version is {{ ansible_version.full }}.
|
||||
Ansible version is {{ pip_package_info.packages.pip.ansible.0.version }}.
|
||||
You must update the requirements to use this version of Algo.
|
||||
Try to run python -m pip install -U -r requirements.txt
|
||||
Try to run python3 -m pip install -U -r requirements.txt
|
||||
|
||||
- name: Include prompts playbook
|
||||
import_playbook: input.yml
|
||||
|
|
|
@ -8,8 +8,9 @@
|
|||
name: "{% if cloud_instance_ip == 'localhost' %}localhost{% else %}{{ cloud_instance_ip }}{% endif %}"
|
||||
groups: vpn-host
|
||||
ansible_connection: "{% if cloud_instance_ip == 'localhost' %}local{% else %}ssh{% endif %}"
|
||||
ansible_ssh_user: "{{ ansible_ssh_user }}"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
ansible_ssh_user: "{{ ansible_ssh_user|default('root') }}"
|
||||
ansible_ssh_port: "{{ ansible_ssh_port|default(22) }}"
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
algo_provider: "{{ algo_provider }}"
|
||||
algo_server_name: "{{ algo_server_name }}"
|
||||
algo_ondemand_cellular: "{{ algo_ondemand_cellular }}"
|
||||
|
@ -19,18 +20,20 @@
|
|||
algo_ssh_tunneling: "{{ algo_ssh_tunneling }}"
|
||||
algo_store_pki: "{{ algo_store_pki }}"
|
||||
IP_subject_alt_name: "{{ IP_subject_alt_name }}"
|
||||
alternative_ingress_ip: "{{ alternative_ingress_ip | default(omit) }}"
|
||||
cloudinit: "{{ cloudinit|default(false) }}"
|
||||
|
||||
- name: Additional variables for the server
|
||||
add_host:
|
||||
name: "{% if cloud_instance_ip == 'localhost' %}localhost{% else %}{{ cloud_instance_ip }}{% endif %}"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private_tmp }}"
|
||||
when: algo_provider != 'local'
|
||||
|
||||
- name: Wait until SSH becomes ready...
|
||||
wait_for:
|
||||
port: 22
|
||||
port: "{{ ansible_ssh_port|default(22) }}"
|
||||
host: "{{ cloud_instance_ip }}"
|
||||
search_regex: "OpenSSH"
|
||||
search_regex: OpenSSH
|
||||
delay: 10
|
||||
timeout: 320
|
||||
state: present
|
||||
|
@ -41,12 +44,12 @@
|
|||
when:
|
||||
- pki_in_tmpfs
|
||||
- not algo_store_pki
|
||||
- ansible_system == "Darwin" or
|
||||
ansible_system == "Linux"
|
||||
- ansible_system == "Darwin" or ansible_system == "Linux"
|
||||
|
||||
- debug:
|
||||
var: IP_subject_alt_name
|
||||
|
||||
- name: A short pause, in order to be sure the instance is ready
|
||||
pause:
|
||||
seconds: 20
|
||||
- name: Wait 600 seconds for target connection to become reachable/usable
|
||||
wait_for_connection:
|
||||
delegate_to: "{{ item }}"
|
||||
loop: "{{ groups['vpn-host'] }}"
|
||||
|
|
|
@ -13,15 +13,14 @@
|
|||
'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \
|
||||
'wireguard_enabled "{{ wireguard_enabled }}"' \
|
||||
'dns_encryption "{{ dns_encryption }}"' \
|
||||
> /dev/tty
|
||||
> /dev/tty || true
|
||||
tags: debug
|
||||
|
||||
- name: Install the requirements
|
||||
pip:
|
||||
state: latest
|
||||
state: present
|
||||
name:
|
||||
- pyOpenSSL
|
||||
- jinja2==2.8
|
||||
- pyOpenSSL>=0.15
|
||||
- segno
|
||||
tags:
|
||||
- always
|
||||
|
@ -29,17 +28,26 @@
|
|||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- block:
|
||||
- name: Generate the SSH private key
|
||||
openssl_privatekey:
|
||||
path: "{{ SSH_keys.private }}"
|
||||
size: 2048
|
||||
size: 4096
|
||||
mode: "0600"
|
||||
type: RSA
|
||||
when: algo_provider != "local"
|
||||
|
||||
- name: Generate the SSH public key
|
||||
openssl_publickey:
|
||||
path: "{{ SSH_keys.public }}"
|
||||
privatekey_path: "{{ SSH_keys.private }}"
|
||||
format: OpenSSH
|
||||
|
||||
- name: Copy the private SSH key to /tmp
|
||||
copy:
|
||||
src: "{{ SSH_keys.private }}"
|
||||
dest: "{{ SSH_keys.private_tmp }}"
|
||||
force: true
|
||||
mode: "0600"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
when: algo_provider != "local"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: Linux | set OS specific facts
|
||||
set_fact:
|
||||
tmpfs_volume_name: "AlgoVPN-{{ IP_subject_alt_name }}"
|
||||
tmpfs_volume_name: AlgoVPN-{{ IP_subject_alt_name }}
|
||||
tmpfs_volume_path: /dev/shm
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: MacOS | set OS specific facts
|
||||
set_fact:
|
||||
tmpfs_volume_name: "AlgoVPN-{{ IP_subject_alt_name }}"
|
||||
tmpfs_volume_name: AlgoVPN-{{ IP_subject_alt_name }}
|
||||
tmpfs_volume_path: /Volumes
|
||||
|
||||
- name: MacOS | mount a ram disk
|
||||
|
@ -9,4 +9,4 @@
|
|||
/usr/sbin/diskutil info "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/" ||
|
||||
/usr/sbin/diskutil erasevolume HFS+ "{{ tmpfs_volume_name }}" $(hdiutil attach -nomount ram://64000)
|
||||
args:
|
||||
creates: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}"
|
||||
creates: /{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}
|
||||
|
|
|
@ -9,11 +9,9 @@
|
|||
|
||||
- name: Set config paths as facts
|
||||
set_fact:
|
||||
wireguard_pki_path: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/WireGuard/"
|
||||
ipsec_pki_path: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/IPsec/"
|
||||
ipsec_pki_path: /{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/IPsec/
|
||||
|
||||
- name: Update config paths
|
||||
add_host:
|
||||
name: "{{ 'localhost' if cloud_instance_ip == 'localhost' else cloud_instance_ip }}"
|
||||
wireguard_pki_path: "{{ wireguard_pki_path }}"
|
||||
ipsec_pki_path: "{{ ipsec_pki_path }}"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: Linux | Delete the PKI directory
|
||||
file:
|
||||
path: "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/"
|
||||
path: /{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/
|
||||
state: absent
|
||||
when: facts.ansible_system == "Linux"
|
||||
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
ansible==2.7.12
|
||||
ansible==9.1.0
|
||||
jinja2~=3.0.3
|
||||
netaddr
|
||||
|
|
5
roles/client/files/libstrongswan-relax-constraints.conf
Normal file
5
roles/client/files/libstrongswan-relax-constraints.conf
Normal file
|
@ -0,0 +1,5 @@
|
|||
libstrongswan {
|
||||
x509 {
|
||||
enforce_critical = no
|
||||
}
|
||||
}
|
|
@ -1,3 +1,3 @@
|
|||
---
|
||||
- name: restart strongswan
|
||||
service: name=strongswan state=restarted
|
||||
service: name={{ strongswan_service }} state=restarted
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Gather Facts
|
||||
setup:
|
||||
|
||||
- name: Include system based facts and tasks
|
||||
import_tasks: systems/main.yml
|
||||
|
||||
|
@ -22,9 +22,9 @@
|
|||
|
||||
- name: Setup the ipsec config
|
||||
template:
|
||||
src: "roles/strongswan/templates/client_ipsec.conf.j2"
|
||||
src: roles/strongswan/templates/client_ipsec.conf.j2
|
||||
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.conf"
|
||||
mode: '0644'
|
||||
mode: "0644"
|
||||
with_items:
|
||||
- "{{ vpn_user }}"
|
||||
notify:
|
||||
|
@ -32,9 +32,9 @@
|
|||
|
||||
- name: Setup the ipsec secrets
|
||||
template:
|
||||
src: "roles/strongswan/templates/client_ipsec.secrets.j2"
|
||||
src: roles/strongswan/templates/client_ipsec.secrets.j2
|
||||
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.secrets"
|
||||
mode: '0600'
|
||||
mode: "0600"
|
||||
with_items:
|
||||
- "{{ vpn_user }}"
|
||||
notify:
|
||||
|
@ -44,25 +44,33 @@
|
|||
lineinfile:
|
||||
dest: "{{ item.dest }}"
|
||||
line: "{{ item.line }}"
|
||||
create: yes
|
||||
create: true
|
||||
with_items:
|
||||
- dest: "{{ configs_prefix }}/ipsec.conf"
|
||||
line: "include ipsec.{{ IP_subject_alt_name }}.conf"
|
||||
line: include ipsec.{{ IP_subject_alt_name }}.conf
|
||||
- dest: "{{ configs_prefix }}/ipsec.secrets"
|
||||
line: "include ipsec.{{ IP_subject_alt_name }}.secrets"
|
||||
line: include ipsec.{{ IP_subject_alt_name }}.secrets
|
||||
notify:
|
||||
- restart strongswan
|
||||
|
||||
- name: Configure libstrongswan to relax CA constraints
|
||||
copy:
|
||||
src: libstrongswan-relax-constraints.conf
|
||||
dest: "{{ configs_prefix }}/strongswan.d/relax-ca-constraints.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
- name: Setup the certificates and keys
|
||||
template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
with_items:
|
||||
- src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/certs/{{ vpn_user }}.crt"
|
||||
- src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/certs/{{ vpn_user }}.crt
|
||||
dest: "{{ configs_prefix }}/ipsec.d/certs/{{ vpn_user }}.crt"
|
||||
- src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/cacert.pem"
|
||||
- src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/cacert.pem
|
||||
dest: "{{ configs_prefix }}/ipsec.d/cacerts/{{ IP_subject_alt_name }}.pem"
|
||||
- src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/private/{{ vpn_user }}.key"
|
||||
- src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/private/{{ vpn_user }}.key
|
||||
dest: "{{ configs_prefix }}/ipsec.d/private/{{ vpn_user }}.key"
|
||||
notify:
|
||||
- restart strongswan
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
- include_tasks: Debian.yml
|
||||
when: ansible_distribution == 'Debian'
|
||||
|
||||
|
|
|
@ -1,243 +1,210 @@
|
|||
---
|
||||
azure_venv: "{{ playbook_dir }}/configs/.venvs/azure"
|
||||
_azure_regions: >
|
||||
[
|
||||
{
|
||||
"displayName": "East Asia",
|
||||
"latitude": "22.267",
|
||||
"longitude": "114.188",
|
||||
"name": "eastasia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Southeast Asia",
|
||||
"latitude": "1.283",
|
||||
"longitude": "103.833",
|
||||
"name": "southeastasia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Central US",
|
||||
"latitude": "41.5908",
|
||||
"longitude": "-93.6208",
|
||||
"name": "centralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "East US",
|
||||
"latitude": "37.3719",
|
||||
"longitude": "-79.8164",
|
||||
"name": "eastus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "East US 2",
|
||||
"latitude": "36.6681",
|
||||
"longitude": "-78.3889",
|
||||
"name": "eastus2",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West US",
|
||||
"latitude": "37.783",
|
||||
"longitude": "-122.417",
|
||||
"name": "westus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "North Central US",
|
||||
"latitude": "41.8819",
|
||||
"longitude": "-87.6278",
|
||||
"name": "northcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South Central US",
|
||||
"latitude": "29.4167",
|
||||
"longitude": "-98.5",
|
||||
"name": "southcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "North Europe",
|
||||
"latitude": "53.3478",
|
||||
"longitude": "-6.2597",
|
||||
"name": "northeurope",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West Europe",
|
||||
"latitude": "52.3667",
|
||||
"longitude": "4.9",
|
||||
"name": "westeurope",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Japan West",
|
||||
"latitude": "34.6939",
|
||||
"longitude": "135.5022",
|
||||
"name": "japanwest",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Japan East",
|
||||
"latitude": "35.68",
|
||||
"longitude": "139.77",
|
||||
"name": "japaneast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Brazil South",
|
||||
"latitude": "-23.55",
|
||||
"longitude": "-46.633",
|
||||
"name": "brazilsouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia East",
|
||||
"latitude": "-33.86",
|
||||
"longitude": "151.2094",
|
||||
"name": "australiaeast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Southeast",
|
||||
"latitude": "-37.8136",
|
||||
"longitude": "144.9631",
|
||||
"name": "australiasoutheast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South India",
|
||||
"latitude": "12.9822",
|
||||
"longitude": "80.1636",
|
||||
"name": "southindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Central India",
|
||||
"latitude": "18.5822",
|
||||
"longitude": "73.9197",
|
||||
"name": "centralindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West India",
|
||||
"latitude": "19.088",
|
||||
"longitude": "72.868",
|
||||
"name": "westindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Canada Central",
|
||||
"latitude": "43.653",
|
||||
"longitude": "-79.383",
|
||||
"name": "canadacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Canada East",
|
||||
"latitude": "46.817",
|
||||
"longitude": "-71.217",
|
||||
"name": "canadaeast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UK South",
|
||||
"latitude": "50.941",
|
||||
"longitude": "-0.799",
|
||||
"name": "uksouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UK West",
|
||||
"latitude": "53.427",
|
||||
"longitude": "-3.084",
|
||||
"name": "ukwest",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West Central US",
|
||||
"latitude": "40.890",
|
||||
"longitude": "-110.234",
|
||||
"name": "westcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West US 2",
|
||||
"latitude": "47.233",
|
||||
"longitude": "-119.852",
|
||||
"name": "westus2",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Korea Central",
|
||||
"latitude": "37.5665",
|
||||
"longitude": "126.9780",
|
||||
"name": "koreacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Korea South",
|
||||
"latitude": "35.1796",
|
||||
"longitude": "129.0756",
|
||||
"name": "koreasouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "France Central",
|
||||
"latitude": "46.3772",
|
||||
"longitude": "2.3730",
|
||||
"name": "francecentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "France South",
|
||||
"latitude": "43.8345",
|
||||
"longitude": "2.1972",
|
||||
"name": "francesouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Central",
|
||||
"latitude": "-35.3075",
|
||||
"longitude": "149.1244",
|
||||
"name": "australiacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Central 2",
|
||||
"latitude": "-35.3075",
|
||||
"longitude": "149.1244",
|
||||
"name": "australiacentral2",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UAE Central",
|
||||
"latitude": "24.466667",
|
||||
"longitude": "54.366669",
|
||||
"name": "uaecentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UAE North",
|
||||
"latitude": "25.266666",
|
||||
"longitude": "55.316666",
|
||||
"name": "uaenorth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South Africa North",
|
||||
"latitude": "-25.731340",
|
||||
"longitude": "28.218370",
|
||||
"name": "southafricanorth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South Africa West",
|
||||
"latitude": "-34.075691",
|
||||
"longitude": "18.843266",
|
||||
"name": "southafricawest",
|
||||
"subscriptionId": null
|
||||
}
|
||||
]
|
||||
# az account list-locations --query 'sort_by([].{name:name,displayName:displayName,regionalDisplayName:regionalDisplayName}, &name)' -o yaml
|
||||
azure_regions:
|
||||
- displayName: Asia
|
||||
name: asia
|
||||
regionalDisplayName: Asia
|
||||
- displayName: Asia Pacific
|
||||
name: asiapacific
|
||||
regionalDisplayName: Asia Pacific
|
||||
- displayName: Australia
|
||||
name: australia
|
||||
regionalDisplayName: Australia
|
||||
- displayName: Australia Central
|
||||
name: australiacentral
|
||||
regionalDisplayName: (Asia Pacific) Australia Central
|
||||
- displayName: Australia Central 2
|
||||
name: australiacentral2
|
||||
regionalDisplayName: (Asia Pacific) Australia Central 2
|
||||
- displayName: Australia East
|
||||
name: australiaeast
|
||||
regionalDisplayName: (Asia Pacific) Australia East
|
||||
- displayName: Australia Southeast
|
||||
name: australiasoutheast
|
||||
regionalDisplayName: (Asia Pacific) Australia Southeast
|
||||
- displayName: Brazil
|
||||
name: brazil
|
||||
regionalDisplayName: Brazil
|
||||
- displayName: Brazil South
|
||||
name: brazilsouth
|
||||
regionalDisplayName: (South America) Brazil South
|
||||
- displayName: Brazil Southeast
|
||||
name: brazilsoutheast
|
||||
regionalDisplayName: (South America) Brazil Southeast
|
||||
- displayName: Canada
|
||||
name: canada
|
||||
regionalDisplayName: Canada
|
||||
- displayName: Canada Central
|
||||
name: canadacentral
|
||||
regionalDisplayName: (Canada) Canada Central
|
||||
- displayName: Canada East
|
||||
name: canadaeast
|
||||
regionalDisplayName: (Canada) Canada East
|
||||
- displayName: Central India
|
||||
name: centralindia
|
||||
regionalDisplayName: (Asia Pacific) Central India
|
||||
- displayName: Central US
|
||||
name: centralus
|
||||
regionalDisplayName: (US) Central US
|
||||
- displayName: Central US EUAP
|
||||
name: centraluseuap
|
||||
regionalDisplayName: (US) Central US EUAP
|
||||
- displayName: Central US (Stage)
|
||||
name: centralusstage
|
||||
regionalDisplayName: (US) Central US (Stage)
|
||||
- displayName: East Asia
|
||||
name: eastasia
|
||||
regionalDisplayName: (Asia Pacific) East Asia
|
||||
- displayName: East Asia (Stage)
|
||||
name: eastasiastage
|
||||
regionalDisplayName: (Asia Pacific) East Asia (Stage)
|
||||
- displayName: East US
|
||||
name: eastus
|
||||
regionalDisplayName: (US) East US
|
||||
- displayName: East US 2
|
||||
name: eastus2
|
||||
regionalDisplayName: (US) East US 2
|
||||
- displayName: East US 2 EUAP
|
||||
name: eastus2euap
|
||||
regionalDisplayName: (US) East US 2 EUAP
|
||||
- displayName: East US 2 (Stage)
|
||||
name: eastus2stage
|
||||
regionalDisplayName: (US) East US 2 (Stage)
|
||||
- displayName: East US (Stage)
|
||||
name: eastusstage
|
||||
regionalDisplayName: (US) East US (Stage)
|
||||
- displayName: Europe
|
||||
name: europe
|
||||
regionalDisplayName: Europe
|
||||
- displayName: France Central
|
||||
name: francecentral
|
||||
regionalDisplayName: (Europe) France Central
|
||||
- displayName: France South
|
||||
name: francesouth
|
||||
regionalDisplayName: (Europe) France South
|
||||
- displayName: Germany North
|
||||
name: germanynorth
|
||||
regionalDisplayName: (Europe) Germany North
|
||||
- displayName: Germany West Central
|
||||
name: germanywestcentral
|
||||
regionalDisplayName: (Europe) Germany West Central
|
||||
- displayName: Global
|
||||
name: global
|
||||
regionalDisplayName: Global
|
||||
- displayName: India
|
||||
name: india
|
||||
regionalDisplayName: India
|
||||
- displayName: Japan
|
||||
name: japan
|
||||
regionalDisplayName: Japan
|
||||
- displayName: Japan East
|
||||
name: japaneast
|
||||
regionalDisplayName: (Asia Pacific) Japan East
|
||||
- displayName: Japan West
|
||||
name: japanwest
|
||||
regionalDisplayName: (Asia Pacific) Japan West
|
||||
- displayName: Jio India Central
|
||||
name: jioindiacentral
|
||||
regionalDisplayName: (Asia Pacific) Jio India Central
|
||||
- displayName: Jio India West
|
||||
name: jioindiawest
|
||||
regionalDisplayName: (Asia Pacific) Jio India West
|
||||
- displayName: Korea Central
|
||||
name: koreacentral
|
||||
regionalDisplayName: (Asia Pacific) Korea Central
|
||||
- displayName: Korea South
|
||||
name: koreasouth
|
||||
regionalDisplayName: (Asia Pacific) Korea South
|
||||
- displayName: North Central US
|
||||
name: northcentralus
|
||||
regionalDisplayName: (US) North Central US
|
||||
- displayName: North Central US (Stage)
|
||||
name: northcentralusstage
|
||||
regionalDisplayName: (US) North Central US (Stage)
|
||||
- displayName: North Europe
|
||||
name: northeurope
|
||||
regionalDisplayName: (Europe) North Europe
|
||||
- displayName: Norway East
|
||||
name: norwayeast
|
||||
regionalDisplayName: (Europe) Norway East
|
||||
- displayName: Norway West
|
||||
name: norwaywest
|
||||
regionalDisplayName: (Europe) Norway West
|
||||
- displayName: Qatar Central
|
||||
name: qatarcentral
|
||||
regionalDisplayName: (Europe) Qatar Central
|
||||
- displayName: South Africa North
|
||||
name: southafricanorth
|
||||
regionalDisplayName: (Africa) South Africa North
|
||||
- displayName: South Africa West
|
||||
name: southafricawest
|
||||
regionalDisplayName: (Africa) South Africa West
|
||||
- displayName: South Central US
|
||||
name: southcentralus
|
||||
regionalDisplayName: (US) South Central US
|
||||
- displayName: South Central US (Stage)
|
||||
name: southcentralusstage
|
||||
regionalDisplayName: (US) South Central US (Stage)
|
||||
- displayName: Southeast Asia
|
||||
name: southeastasia
|
||||
regionalDisplayName: (Asia Pacific) Southeast Asia
|
||||
- displayName: Southeast Asia (Stage)
|
||||
name: southeastasiastage
|
||||
regionalDisplayName: (Asia Pacific) Southeast Asia (Stage)
|
||||
- displayName: South India
|
||||
name: southindia
|
||||
regionalDisplayName: (Asia Pacific) South India
|
||||
- displayName: Sweden Central
|
||||
name: swedencentral
|
||||
regionalDisplayName: (Europe) Sweden Central
|
||||
- displayName: Sweden South
|
||||
name: swedensouth
|
||||
regionalDisplayName: (Europe) Sweden South
|
||||
- displayName: Switzerland North
|
||||
name: switzerlandnorth
|
||||
regionalDisplayName: (Europe) Switzerland North
|
||||
- displayName: Switzerland West
|
||||
name: switzerlandwest
|
||||
regionalDisplayName: (Europe) Switzerland West
|
||||
- displayName: UAE Central
|
||||
name: uaecentral
|
||||
regionalDisplayName: (Middle East) UAE Central
|
||||
- displayName: UAE North
|
||||
name: uaenorth
|
||||
regionalDisplayName: (Middle East) UAE North
|
||||
- displayName: United Kingdom
|
||||
name: uk
|
||||
regionalDisplayName: United Kingdom
|
||||
- displayName: UK South
|
||||
name: uksouth
|
||||
regionalDisplayName: (Europe) UK South
|
||||
- displayName: UK West
|
||||
name: ukwest
|
||||
regionalDisplayName: (Europe) UK West
|
||||
- displayName: United States
|
||||
name: unitedstates
|
||||
regionalDisplayName: United States
|
||||
- displayName: West Central US
|
||||
name: westcentralus
|
||||
regionalDisplayName: (US) West Central US
|
||||
- displayName: West Europe
|
||||
name: westeurope
|
||||
regionalDisplayName: (Europe) West Europe
|
||||
- displayName: West India
|
||||
name: westindia
|
||||
regionalDisplayName: (Asia Pacific) West India
|
||||
- displayName: West US
|
||||
name: westus
|
||||
regionalDisplayName: (US) West US
|
||||
- displayName: West US 2
|
||||
name: westus2
|
||||
regionalDisplayName: (US) West US 2
|
||||
- displayName: West US 2 (Stage)
|
||||
name: westus2stage
|
||||
regionalDisplayName: (US) West US 2 (Stage)
|
||||
- displayName: West US 3
|
||||
name: westus3
|
||||
regionalDisplayName: (US) West US 3
|
||||
- displayName: West US (Stage)
|
||||
name: westusstage
|
||||
regionalDisplayName: (US) West US (Stage)
|
||||
|
|
|
@ -11,8 +11,26 @@
|
|||
"vmSize": {
|
||||
"type": "string"
|
||||
},
|
||||
"imageReferencePublisher": {
|
||||
"type": "string"
|
||||
},
|
||||
"imageReferenceOffer": {
|
||||
"type": "string"
|
||||
},
|
||||
"imageReferenceSku": {
|
||||
"type": "string"
|
||||
},
|
||||
"imageReferenceVersion": {
|
||||
"type": "string"
|
||||
},
|
||||
"osDiskType": {
|
||||
"type": "string"
|
||||
},
|
||||
"SshPort": {
|
||||
"type": "int"
|
||||
},
|
||||
"UserData": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"variables": {
|
||||
|
@ -30,10 +48,10 @@
|
|||
{
|
||||
"name": "AllowSSH",
|
||||
"properties": {
|
||||
"description": "Locks inbound down to ssh default port 22.",
|
||||
"description": "Allow SSH",
|
||||
"protocol": "Tcp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "22",
|
||||
"destinationPortRange": "[parameters('SshPort')]",
|
||||
"sourceAddressPrefix": "*",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
|
@ -160,13 +178,14 @@
|
|||
},
|
||||
"osProfile": {
|
||||
"computerName": "[resourceGroup().name]",
|
||||
"adminUsername": "ubuntu",
|
||||
"customData": "[parameters('UserData')]",
|
||||
"adminUsername": "algo",
|
||||
"linuxConfiguration": {
|
||||
"disablePasswordAuthentication": true,
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"path": "/home/ubuntu/.ssh/authorized_keys",
|
||||
"path": "/home/algo/.ssh/authorized_keys",
|
||||
"keyData": "[parameters('sshKeyData')]"
|
||||
}
|
||||
]
|
||||
|
@ -175,13 +194,16 @@
|
|||
},
|
||||
"storageProfile": {
|
||||
"imageReference": {
|
||||
"publisher": "Canonical",
|
||||
"offer": "UbuntuServer",
|
||||
"publisher": "[parameters('imageReferencePublisher')]",
|
||||
"offer": "[parameters('imageReferenceOffer')]",
|
||||
"sku": "[parameters('imageReferenceSku')]",
|
||||
"version": "latest"
|
||||
"version": "[parameters('imageReferenceVersion')]"
|
||||
},
|
||||
"osDisk": {
|
||||
"createOption": "FromImage"
|
||||
"createOption": "FromImage",
|
||||
"managedDisk": {
|
||||
"storageAccountType": "[parameters('osDiskType')]"
|
||||
}
|
||||
}
|
||||
},
|
||||
"networkProfile": {
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
|
@ -30,12 +29,24 @@
|
|||
value: "{{ wireguard_port }}"
|
||||
vmSize:
|
||||
value: "{{ cloud_providers.azure.size }}"
|
||||
imageReferencePublisher:
|
||||
value: "{{ cloud_providers.azure.image.publisher }}"
|
||||
imageReferenceOffer:
|
||||
value: "{{ cloud_providers.azure.image.offer }}"
|
||||
imageReferenceSku:
|
||||
value: "{{ cloud_providers.azure.image }}"
|
||||
value: "{{ cloud_providers.azure.image.sku }}"
|
||||
imageReferenceVersion:
|
||||
value: "{{ cloud_providers.azure.image.version }}"
|
||||
osDiskType:
|
||||
value: "{{ cloud_providers.azure.osDisk.type }}"
|
||||
SshPort:
|
||||
value: "{{ ssh_port }}"
|
||||
UserData:
|
||||
value: "{{ lookup('template', 'files/cloud-init/base.yml') | b64encode }}"
|
||||
register: azure_rm_deployment
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ azure_rm_deployment.deployment.outputs.publicIPAddresses.value }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ azure_venv }}/lib/python2.7/site-packages/"
|
||||
ansible_ssh_user: algo
|
||||
ansible_ssh_port: "{{ ssh_port }}"
|
||||
cloudinit: true
|
||||
|
|
|
@ -6,10 +6,6 @@
|
|||
subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}"
|
||||
|
||||
- block:
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
azure_regions: "{{ _azure_regions|from_json | sort(attribute='name') }}"
|
||||
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
|
@ -21,7 +17,7 @@
|
|||
prompt: |
|
||||
What region should the server be located in?
|
||||
{% for r in azure_regions %}
|
||||
{{ loop.index }}. {{ r['displayName'] }}
|
||||
{{ loop.index }}. {{ r['regionalDisplayName'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
|
|
|
@ -1,41 +1,6 @@
|
|||
---
|
||||
- name: Clean up the environment
|
||||
file:
|
||||
dest: "{{ azure_venv }}"
|
||||
state: absent
|
||||
when: clean_environment
|
||||
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name:
|
||||
- packaging
|
||||
- requests[security]
|
||||
- azure-cli-core==2.0.35
|
||||
- azure-cli-nspkg==3.0.2
|
||||
- azure-common==1.1.11
|
||||
- azure-mgmt-batch==4.1.0
|
||||
- azure-mgmt-compute==2.1.0
|
||||
- azure-mgmt-containerinstance==0.4.0
|
||||
- azure-mgmt-containerregistry==2.0.0
|
||||
- azure-mgmt-containerservice==3.0.1
|
||||
- azure-mgmt-dns==1.2.0
|
||||
- azure-mgmt-keyvault==0.40.0
|
||||
- azure-mgmt-marketplaceordering==0.1.0
|
||||
- azure-mgmt-monitor==0.5.2
|
||||
- azure-mgmt-network==1.7.1
|
||||
- azure-mgmt-nspkg==2.0.0
|
||||
- azure-mgmt-rdbms==1.2.0
|
||||
- azure-mgmt-resource==1.2.2
|
||||
- azure-mgmt-sql==0.7.1
|
||||
- azure-mgmt-storage==1.5.0
|
||||
- azure-mgmt-trafficmanager==0.50.0
|
||||
- azure-mgmt-web==0.32.0
|
||||
- azure-nspkg==2.0.0
|
||||
- azure-storage==0.35.1
|
||||
- msrest==0.4.29
|
||||
- msrestazure==0.4.31
|
||||
- azure-keyvault==1.0.0a1
|
||||
- azure-graphrbac==0.40.0
|
||||
requirements: https://raw.githubusercontent.com/ansible-collections/azure/v1.13.0/requirements-azure.txt
|
||||
state: latest
|
||||
virtualenv: "{{ azure_venv }}"
|
||||
virtualenv_python: python2.7
|
||||
virtualenv_python: python3
|
||||
|
|
59
roles/cloud-cloudstack/tasks/main.yml
Normal file
59
roles/cloud-cloudstack/tasks/main.yml
Normal file
|
@ -0,0 +1,59 @@
|
|||
---
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- block:
|
||||
- set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input | length > 0 %}{{ cs_zones[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- else %}{{ cs_zones[default_zone | int - 1]['name'] }}{% endif %}
|
||||
|
||||
- name: Security group created
|
||||
cs_securitygroup:
|
||||
name: "{{ algo_server_name }}-security_group"
|
||||
description: AlgoVPN security group
|
||||
register: cs_security_group
|
||||
|
||||
- name: Security rules created
|
||||
cs_securitygroup_rule:
|
||||
security_group: "{{ cs_security_group.name }}"
|
||||
protocol: "{{ item.proto }}"
|
||||
start_port: "{{ item.start_port }}"
|
||||
end_port: "{{ item.end_port }}"
|
||||
cidr: "{{ item.range }}"
|
||||
with_items:
|
||||
- { proto: tcp, start_port: "{{ ssh_port }}", end_port: "{{ ssh_port }}", range: 0.0.0.0/0 }
|
||||
- { proto: udp, start_port: 4500, end_port: 4500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, start_port: 500, end_port: 500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, start_port: "{{ wireguard_port }}", end_port: "{{ wireguard_port }}", range: 0.0.0.0/0 }
|
||||
|
||||
- name: Set facts
|
||||
set_fact:
|
||||
image_id: "{{ cloud_providers.cloudstack.image }}"
|
||||
size: "{{ cloud_providers.cloudstack.size }}"
|
||||
disk: "{{ cloud_providers.cloudstack.disk }}"
|
||||
|
||||
- name: Server created
|
||||
cs_instance:
|
||||
name: "{{ algo_server_name }}"
|
||||
root_disk_size: "{{ disk }}"
|
||||
template: "{{ image_id }}"
|
||||
security_groups: "{{ cs_security_group.name }}"
|
||||
zone: "{{ algo_region }}"
|
||||
service_offering: "{{ size }}"
|
||||
user_data: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
|
||||
register: cs_server
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ cs_server.default_ip }}"
|
||||
ansible_ssh_user: algo
|
||||
ansible_ssh_port: "{{ ssh_port }}"
|
||||
cloudinit: true
|
||||
environment:
|
||||
CLOUDSTACK_KEY: "{{ algo_cs_key }}"
|
||||
CLOUDSTACK_SECRET: "{{ algo_cs_token }}"
|
||||
CLOUDSTACK_ENDPOINT: "{{ algo_cs_url }}"
|
65
roles/cloud-cloudstack/tasks/prompts.yml
Normal file
65
roles/cloud-cloudstack/tasks/prompts.yml
Normal file
|
@ -0,0 +1,65 @@
|
|||
---
|
||||
- block:
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the API key (https://trailofbits.github.io/algo/cloud-cloudstack.html):
|
||||
echo: false
|
||||
register: _cs_key
|
||||
when:
|
||||
- cs_key is undefined
|
||||
- lookup('env','CLOUDSTACK_KEY')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the API ssecret (https://trailofbits.github.io/algo/cloud-cloudstack.html):
|
||||
echo: false
|
||||
register: _cs_secret
|
||||
when:
|
||||
- cs_secret is undefined
|
||||
- lookup('env','CLOUDSTACK_SECRET')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the API endpoint (https://trailofbits.github.io/algo/cloud-cloudstack.html)
|
||||
[https://api.exoscale.com/compute]
|
||||
register: _cs_url
|
||||
when:
|
||||
- cs_url is undefined
|
||||
- lookup('env', 'CLOUDSTACK_ENDPOINT') | length <= 0
|
||||
|
||||
- set_fact:
|
||||
algo_cs_key: "{{ cs_key | default(_cs_key.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_KEY'), true) }}"
|
||||
algo_cs_token: "{{ cs_secret | default(_cs_secret.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_SECRET'), true) }}"
|
||||
algo_cs_url: "{{ cs_url | default(_cs_url.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_ENDPOINT'), true) | default('https://api.exoscale.com/compute',\
|
||||
\ true) }}"
|
||||
|
||||
- name: Get zones on cloud
|
||||
cs_zone_info:
|
||||
register: _cs_zones
|
||||
environment:
|
||||
CLOUDSTACK_KEY: "{{ algo_cs_key }}"
|
||||
CLOUDSTACK_SECRET: "{{ algo_cs_token }}"
|
||||
CLOUDSTACK_ENDPOINT: "{{ algo_cs_url }}"
|
||||
|
||||
- name: Extract zones from output
|
||||
set_fact:
|
||||
cs_zones: "{{ _cs_zones['zones'] | sort(attribute='name') }}"
|
||||
|
||||
- name: Set the default zone
|
||||
set_fact:
|
||||
default_zone: >-
|
||||
{% for z in cs_zones %}
|
||||
{%- if z['name'] == "ch-gva-2" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What zone should the server be located in?
|
||||
{% for z in cs_zones %}
|
||||
{{ loop.index }}. {{ z['name'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired zone
|
||||
[{{ default_zone }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
8
roles/cloud-cloudstack/tasks/venv.yml
Normal file
8
roles/cloud-cloudstack/tasks/venv.yml
Normal file
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name:
|
||||
- cs
|
||||
- sshpubkeys
|
||||
state: latest
|
||||
virtualenv_python: python3
|
|
@ -1,2 +0,0 @@
|
|||
---
|
||||
digitalocean_venv: "{{ playbook_dir }}/configs/.venvs/digitalocean"
|
|
@ -1,105 +1,50 @@
|
|||
---
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Set additional facts
|
||||
set_fact:
|
||||
algo_do_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input %}{{ do_regions[_algo_region.user_input | int -1 ]['slug'] }}
|
||||
{%- else %}{{ do_regions[default_region | int - 1]['slug'] }}{% endif %}
|
||||
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- block:
|
||||
- name: "Delete the existing Algo SSH keys"
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
until: not ssh_keys.changed
|
||||
retries: 10
|
||||
delay: 1
|
||||
|
||||
rescue:
|
||||
- name: Collect the fail error
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
|
||||
- debug: var=ssh_keys
|
||||
|
||||
- fail:
|
||||
msg: "Please, ensure that your API token is not read-only."
|
||||
|
||||
- name: "Upload the SSH key"
|
||||
digital_ocean:
|
||||
state: present
|
||||
command: ssh
|
||||
ssh_pub_key: "{{ public_key }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
- name: Upload the SSH key
|
||||
digital_ocean_sshkey:
|
||||
oauth_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
ssh_pub_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
register: do_ssh_key
|
||||
|
||||
- name: "Creating a droplet..."
|
||||
digital_ocean:
|
||||
- name: Creating a droplet...
|
||||
digital_ocean_droplet:
|
||||
state: present
|
||||
command: droplet
|
||||
name: "{{ algo_server_name }}"
|
||||
region_id: "{{ algo_do_region }}"
|
||||
size_id: "{{ cloud_providers.digitalocean.size }}"
|
||||
image_id: "{{ cloud_providers.digitalocean.image }}"
|
||||
ssh_key_ids: "{{ do_ssh_key.ssh_key.id }}"
|
||||
unique_name: yes
|
||||
api_token: "{{ algo_do_token }}"
|
||||
ipv6: yes
|
||||
register: do
|
||||
oauth_token: "{{ algo_do_token }}"
|
||||
size: "{{ cloud_providers.digitalocean.size }}"
|
||||
region: "{{ algo_do_region }}"
|
||||
image: "{{ cloud_providers.digitalocean.image }}"
|
||||
wait_timeout: 300
|
||||
unique_name: true
|
||||
ipv6: true
|
||||
ssh_keys: "{{ do_ssh_key.data.ssh_key.id }}"
|
||||
user_data: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
|
||||
tags:
|
||||
- Environment:Algo
|
||||
register: digital_ocean_droplet
|
||||
|
||||
# Return data is not idempotent
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ do.droplet.ip_address }}"
|
||||
ansible_ssh_user: root
|
||||
|
||||
- name: Tag the droplet
|
||||
digital_ocean_tag:
|
||||
name: "Environment:Algo"
|
||||
resource_id: "{{ do.droplet.id }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
state: present
|
||||
droplet: "{{ digital_ocean_droplet.data.droplet | default(digital_ocean_droplet.data) }}"
|
||||
|
||||
- block:
|
||||
- name: "Delete the new Algo SSH key"
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
until: not ssh_keys.changed
|
||||
retries: 10
|
||||
delay: 1
|
||||
- name: Create a Floating IP
|
||||
digital_ocean_floating_ip:
|
||||
state: present
|
||||
oauth_token: "{{ algo_do_token }}"
|
||||
droplet_id: "{{ droplet.id }}"
|
||||
register: digital_ocean_floating_ip
|
||||
|
||||
rescue:
|
||||
- name: Collect the fail error
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
- name: Set the static ip as a fact
|
||||
set_fact:
|
||||
cloud_alternative_ingress_ip: "{{ digital_ocean_floating_ip.data.floating_ip.ip }}"
|
||||
when: alternative_ingress_ip
|
||||
|
||||
- debug: var=ssh_keys
|
||||
|
||||
- fail:
|
||||
msg: "Please, ensure that your API token is not read-only."
|
||||
environment:
|
||||
PYTHONPATH: "{{ digitalocean_venv }}/lib/python2.7/site-packages/"
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ (droplet.networks.v4 | selectattr('type', '==', 'public')).0.ip_address }}"
|
||||
ansible_ssh_user: algo
|
||||
ansible_ssh_port: "{{ ssh_port }}"
|
||||
cloudinit: true
|
||||
|
|
|
@ -18,13 +18,13 @@
|
|||
method: GET
|
||||
status_code: 200
|
||||
headers:
|
||||
Content-Type: "application/json"
|
||||
Authorization: "Bearer {{ algo_do_token }}"
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer {{ algo_do_token }}
|
||||
register: _do_regions
|
||||
|
||||
- name: Set facts about thre regions
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
do_regions: "{{ _do_regions.json.regions | sort(attribute='slug') }}"
|
||||
do_regions: "{{ _do_regions.json.regions | selectattr('available', 'true') | sort(attribute='slug') }}"
|
||||
|
||||
- name: Set default region
|
||||
set_fact:
|
||||
|
@ -44,3 +44,10 @@
|
|||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
||||
- name: Set additional facts
|
||||
set_fact:
|
||||
algo_do_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input %}{{ do_regions[_algo_region.user_input | int -1 ]['slug'] }}
|
||||
{%- else %}{{ do_regions[default_region | int - 1]['slug'] }}{% endif %}
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
---
|
||||
- name: Clean up the environment
|
||||
file:
|
||||
dest: "{{ digitalocean_venv }}"
|
||||
state: absent
|
||||
when: clean_environment
|
||||
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name: dopy
|
||||
version: 0.3.5
|
||||
virtualenv: "{{ digitalocean_venv }}"
|
||||
virtualenv_python: python2.7
|
|
@ -1,8 +1,6 @@
|
|||
---
|
||||
ami_search_encrypted: omit
|
||||
encrypted: "{{ cloud_providers.ec2.encrypted }}"
|
||||
ec2_vpc_nets:
|
||||
cidr_block: 172.16.0.0/16
|
||||
subnet_cidr: 172.16.254.0/23
|
||||
ec2_venv: "{{ playbook_dir }}/configs/.venvs/aws"
|
||||
existing_eip: ""
|
||||
|
|
|
@ -14,9 +14,23 @@ Parameters:
|
|||
UseThisElasticIP:
|
||||
Type: String
|
||||
Default: ''
|
||||
EbsEncrypted:
|
||||
Type: String
|
||||
UserData:
|
||||
Type: String
|
||||
SshPort:
|
||||
Type: String
|
||||
InstanceMarketTypeParameter:
|
||||
Description: Launch a Spot instance or standard on-demand instance
|
||||
Type: String
|
||||
Default: on-demand
|
||||
AllowedValues:
|
||||
- spot
|
||||
- on-demand
|
||||
Conditions:
|
||||
AllocateNewEIP: !Equals [!Ref UseThisElasticIP, '']
|
||||
AssociateExistingEIP: !Not [!Equals [!Ref UseThisElasticIP, '']]
|
||||
InstanceIsSpot: !Equals [spot, !Ref InstanceMarketTypeParameter]
|
||||
Resources:
|
||||
VPC:
|
||||
Type: AWS::EC2::VPC
|
||||
|
@ -121,8 +135,8 @@ Resources:
|
|||
GroupDescription: Enable SSH and IPsec
|
||||
SecurityGroupIngress:
|
||||
- IpProtocol: tcp
|
||||
FromPort: '22'
|
||||
ToPort: '22'
|
||||
FromPort: !Ref SshPort
|
||||
ToPort: !Ref SshPort
|
||||
CidrIp: 0.0.0.0/0
|
||||
- IpProtocol: udp
|
||||
FromPort: '500'
|
||||
|
@ -140,25 +154,30 @@ Resources:
|
|||
- Key: Name
|
||||
Value: !Ref AWS::StackName
|
||||
|
||||
EC2LaunchTemplate:
|
||||
Type: AWS::EC2::LaunchTemplate
|
||||
Condition: InstanceIsSpot # Only create this template if requested
|
||||
Properties: # a spot instance_market_type in config.cfg
|
||||
LaunchTemplateName: !Ref AWS::StackName
|
||||
LaunchTemplateData:
|
||||
InstanceMarketOptions:
|
||||
MarketType: spot
|
||||
|
||||
EC2Instance:
|
||||
Type: AWS::EC2::Instance
|
||||
DependsOn:
|
||||
- SubnetIPv6
|
||||
- Subnet
|
||||
- InstanceSecurityGroup
|
||||
Metadata:
|
||||
AWS::CloudFormation::Init:
|
||||
config:
|
||||
files:
|
||||
/home/ubuntu/.ssh/authorized_keys:
|
||||
content:
|
||||
Ref: PublicSSHKeyParameter
|
||||
mode: "000644"
|
||||
owner: "ubuntu"
|
||||
group: "ubuntu"
|
||||
Properties:
|
||||
InstanceType:
|
||||
Ref: InstanceTypeParameter
|
||||
BlockDeviceMappings:
|
||||
- DeviceName: /dev/sda1
|
||||
Ebs:
|
||||
DeleteOnTermination: true
|
||||
VolumeSize: 8
|
||||
Encrypted: !Ref EbsEncrypted
|
||||
InstanceInitiatedShutdownBehavior: terminate
|
||||
SecurityGroupIds:
|
||||
- Ref: InstanceSecurityGroup
|
||||
|
@ -166,15 +185,15 @@ Resources:
|
|||
Ref: ImageIdParameter
|
||||
SubnetId: !Ref Subnet
|
||||
Ipv6AddressCount: 1
|
||||
UserData:
|
||||
"Fn::Base64":
|
||||
!Sub |
|
||||
#!/bin/bash -xe
|
||||
apt-get update
|
||||
apt-get -y install python-pip
|
||||
pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
|
||||
cfn-init -v --stack ${AWS::StackName} --resource EC2Instance --region ${AWS::Region}
|
||||
cfn-signal -e $? --stack ${AWS::StackName} --resource EC2Instance --region ${AWS::Region}
|
||||
UserData: !Ref UserData
|
||||
LaunchTemplate:
|
||||
!If # Only if Conditions created "EC2LaunchTemplate"
|
||||
- InstanceIsSpot
|
||||
-
|
||||
LaunchTemplateId:
|
||||
!Ref EC2LaunchTemplate
|
||||
Version: 1
|
||||
- !Ref AWS::NoValue # Else this LaunchTemplate not set
|
||||
Tags:
|
||||
- Key: Name
|
||||
Value: !Ref AWS::StackName
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
stack_name: "{{ stack_name }}"
|
||||
state: "present"
|
||||
state: present
|
||||
region: "{{ algo_region }}"
|
||||
template: roles/cloud-ec2/files/stack.yaml
|
||||
template_parameters:
|
||||
|
@ -13,6 +13,10 @@
|
|||
ImageIdParameter: "{{ ami_image }}"
|
||||
WireGuardPort: "{{ wireguard_port }}"
|
||||
UseThisElasticIP: "{{ existing_eip }}"
|
||||
EbsEncrypted: "{{ encrypted }}"
|
||||
UserData: "{{ lookup('template', 'files/cloud-init/base.yml') | b64encode }}"
|
||||
SshPort: "{{ ssh_port }}"
|
||||
InstanceMarketTypeParameter: "{{ cloud_providers.ec2.instance_market_type }}"
|
||||
tags:
|
||||
Environment: Algo
|
||||
register: stack
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
---
|
||||
- name: Check if the encrypted image already exist
|
||||
ec2_ami_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
owners: self
|
||||
region: "{{ algo_region }}"
|
||||
filters:
|
||||
state: available
|
||||
"tag:Algo": encrypted
|
||||
"tag:image": "{{ cloud_providers.ec2.image.name }}"
|
||||
register: search_crypt
|
||||
|
||||
- name: Copy to an encrypted image
|
||||
ec2_ami_copy:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
encrypted: yes
|
||||
name: "algo/{{ cloud_providers.ec2.image.name }}"
|
||||
kms_key_id: "{{ kms_key_id | default(omit) }}"
|
||||
region: "{{ algo_region }}"
|
||||
source_image_id: "{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}"
|
||||
source_region: "{{ algo_region }}"
|
||||
wait: true
|
||||
tags:
|
||||
Algo: "encrypted"
|
||||
image: "{{ cloud_providers.ec2.image.name }}"
|
||||
register: ami_search_encrypted
|
||||
when: search_crypt.images|length|int == 0
|
|
@ -2,35 +2,29 @@
|
|||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Locate official AMI for region
|
||||
ec2_ami_facts:
|
||||
ec2_ami_info:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
owners: "{{ cloud_providers.ec2.image.owner }}"
|
||||
region: "{{ algo_region }}"
|
||||
filters:
|
||||
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
|
||||
architecture: "{{ cloud_providers.ec2.image.arch }}"
|
||||
name: ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-*64-server-*
|
||||
register: ami_search
|
||||
|
||||
- import_tasks: encrypt_image.yml
|
||||
when: encrypted
|
||||
|
||||
- name: Set the ami id as a fact
|
||||
set_fact:
|
||||
ami_image: >-
|
||||
{% if ami_search_encrypted.image_id is defined %}{{ ami_search_encrypted.image_id }}
|
||||
{%- elif search_crypt.images is defined and search_crypt.images|length >= 1 %}{{ (search_crypt.images | sort(attribute='creation_date') | last)['image_id'] }}
|
||||
{%- else %}{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}{% endif %}
|
||||
ami_image: "{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}"
|
||||
|
||||
- name: Deploy the stack
|
||||
import_tasks: cloudformation.yml
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ stack.stack_outputs.ElasticIP }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ ec2_venv }}/lib/python2.7/site-packages/"
|
||||
ansible_ssh_user: algo
|
||||
ansible_ssh_port: "{{ ssh_port }}"
|
||||
cloudinit: true
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Enter your AWS Access Key ID (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md)
|
||||
echo: false
|
||||
register: _aws_access_key
|
||||
|
@ -11,7 +11,7 @@
|
|||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Enter your AWS Secret Access Key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
echo: false
|
||||
register: _aws_secret_key
|
||||
when:
|
||||
|
@ -24,7 +24,7 @@
|
|||
|
||||
- block:
|
||||
- name: Get regions
|
||||
aws_region_facts:
|
||||
aws_region_info:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: us-east-1
|
||||
|
@ -64,7 +64,7 @@
|
|||
|
||||
- block:
|
||||
- name: Get existing available Elastic IPs
|
||||
ec2_eip_facts:
|
||||
ec2_eip_info:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: "{{ algo_region }}"
|
||||
|
|
|
@ -1,15 +1,8 @@
|
|||
---
|
||||
- name: Clean up the environment
|
||||
file:
|
||||
dest: "{{ ec2_venv }}"
|
||||
state: absent
|
||||
when: clean_environment
|
||||
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name:
|
||||
- boto>=2.5
|
||||
- boto3
|
||||
state: latest
|
||||
virtualenv: "{{ ec2_venv }}"
|
||||
virtualenv_python: python2.7
|
||||
virtualenv_python: python3
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
---
|
||||
gce_venv: "{{ playbook_dir }}/configs/.venvs/gce"
|
|
@ -2,56 +2,83 @@
|
|||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Network configured
|
||||
gce_net:
|
||||
name: "{{ algo_server_name }}"
|
||||
fwname: "{{ algo_server_name }}-fw"
|
||||
allowed: "udp:500,4500,{{ wireguard_port }};tcp:22"
|
||||
state: "present"
|
||||
mode: auto
|
||||
src_range: 0.0.0.0/0
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
gcp_compute_network:
|
||||
auth_kind: serviceaccount
|
||||
service_account_file: "{{ credentials_file_path }}"
|
||||
project: "{{ project_id }}"
|
||||
name: algovpn
|
||||
auto_create_subnetworks: true
|
||||
routing_config:
|
||||
routing_mode: REGIONAL
|
||||
register: gcp_compute_network
|
||||
|
||||
- name: Firewall configured
|
||||
gcp_compute_firewall:
|
||||
auth_kind: serviceaccount
|
||||
service_account_file: "{{ credentials_file_path }}"
|
||||
project: "{{ project_id }}"
|
||||
name: algovpn
|
||||
network: "{{ gcp_compute_network }}"
|
||||
direction: INGRESS
|
||||
allowed:
|
||||
- ip_protocol: udp
|
||||
ports:
|
||||
- "500"
|
||||
- "4500"
|
||||
- "{{ wireguard_port|string }}"
|
||||
- ip_protocol: tcp
|
||||
ports:
|
||||
- "{{ ssh_port }}"
|
||||
- ip_protocol: icmp
|
||||
|
||||
- block:
|
||||
- name: External IP allocated
|
||||
gce_eip:
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
gcp_compute_address:
|
||||
auth_kind: serviceaccount
|
||||
service_account_file: "{{ credentials_file_path }}"
|
||||
project: "{{ project_id }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
region: "{{ algo_region.split('-')[0:2] | join('-') }}"
|
||||
state: present
|
||||
register: gce_eip
|
||||
region: "{{ algo_region }}"
|
||||
register: gcp_compute_address
|
||||
|
||||
- name: Set External IP as a fact
|
||||
set_fact:
|
||||
external_ip: "{{ gce_eip.address }}"
|
||||
external_ip: "{{ gcp_compute_address.address }}"
|
||||
when: cloud_providers.gce.external_static_ip
|
||||
|
||||
- name: "Creating a new instance..."
|
||||
gce:
|
||||
instance_names: "{{ algo_server_name }}"
|
||||
zone: "{{ algo_region }}"
|
||||
external_ip: "{{ external_ip | default('ephemeral') }}"
|
||||
- name: Instance created
|
||||
gcp_compute_instance:
|
||||
auth_kind: serviceaccount
|
||||
service_account_file: "{{ credentials_file_path }}"
|
||||
project: "{{ project_id }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
zone: "{{ algo_zone }}"
|
||||
machine_type: "{{ cloud_providers.gce.size }}"
|
||||
image: "{{ cloud_providers.gce.image }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
metadata: '{"ssh-keys":"ubuntu:{{ ssh_public_key_lookup }}"}'
|
||||
network: "{{ algo_server_name }}"
|
||||
disks:
|
||||
- auto_delete: true
|
||||
boot: true
|
||||
initialize_params:
|
||||
source_image: projects/ubuntu-os-cloud/global/images/family/{{ cloud_providers.gce.image }}
|
||||
metadata:
|
||||
ssh-keys: algo:{{ ssh_public_key_lookup }}
|
||||
user-data: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
|
||||
network_interfaces:
|
||||
- network: "{{ gcp_compute_network }}"
|
||||
access_configs:
|
||||
- name: "{{ algo_server_name }}"
|
||||
nat_ip: "{{ gcp_compute_address|default(None) }}"
|
||||
type: ONE_TO_ONE_NAT
|
||||
tags:
|
||||
- "environment-algo"
|
||||
register: google_vm
|
||||
items:
|
||||
- environment-algo
|
||||
register: gcp_compute_instance
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
environment:
|
||||
PYTHONPATH: "{{ gce_venv }}/lib/python2.7/site-packages/"
|
||||
cloud_instance_ip: "{{ gcp_compute_instance.networkInterfaces[0].accessConfigs[0].natIP }}"
|
||||
ansible_ssh_user: algo
|
||||
ansible_ssh_port: "{{ ssh_port }}"
|
||||
cloudinit: true
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
- lookup('env','GCE_CREDENTIALS_FILE_PATH')|length <= 0
|
||||
|
||||
- set_fact:
|
||||
credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}"
|
||||
credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'),\
|
||||
\ true) }}"
|
||||
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- set_fact:
|
||||
|
@ -21,36 +22,32 @@
|
|||
|
||||
- block:
|
||||
- name: Get regions
|
||||
gce_region_facts:
|
||||
service_account_email: "{{ credentials_file_lookup.client_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id }}"
|
||||
register: _gce_regions
|
||||
gcp_compute_location_info:
|
||||
auth_kind: serviceaccount
|
||||
service_account_file: "{{ credentials_file_path }}"
|
||||
project: "{{ project_id }}"
|
||||
scope: regions
|
||||
filters: status=UP
|
||||
register: gcp_compute_regions_info
|
||||
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
gce_regions: >-
|
||||
[{%- for region in _gce_regions.results.regions | sort(attribute='name') -%}
|
||||
{% if region.status == "UP" %}
|
||||
{% for zone in region.zones | sort(attribute='name') %}
|
||||
{% if zone.status == "UP" %}
|
||||
'{{ zone.name }}'
|
||||
{% endif %}{% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}{% if not loop.last %},{% endif %}
|
||||
[{%- for region in gcp_compute_regions_info.resources | sort(attribute='name') -%}
|
||||
'{{ region.name }}'{% if not loop.last %},{% endif %}
|
||||
{%- endfor -%}]
|
||||
|
||||
- name: Set facts about the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for region in gce_regions %}
|
||||
{%- if region == "us-east1-b" %}{{ loop.index }}{% endif %}
|
||||
{%- if region == "us-east1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://cloud.google.com/compute/docs/regions-zones/)
|
||||
(https://cloud.google.com/compute/docs/regions-zones/#locations)
|
||||
{% for r in gce_regions %}
|
||||
{{ loop.index }}. {{ r }}
|
||||
{% endfor %}
|
||||
|
@ -60,8 +57,24 @@
|
|||
register: _gce_region
|
||||
when: region is undefined
|
||||
|
||||
- set_fact:
|
||||
- name: Set region as a fact
|
||||
set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _gce_region.user_input %}{{ gce_regions[_gce_region.user_input | int -1 ] }}
|
||||
{%- else %}{{ gce_regions[default_region | int - 1] }}{% endif %}
|
||||
|
||||
- name: Get zones
|
||||
gcp_compute_location_info:
|
||||
auth_kind: serviceaccount
|
||||
service_account_file: "{{ credentials_file_path }}"
|
||||
project: "{{ project_id }}"
|
||||
scope: zones
|
||||
filters:
|
||||
- name={{ algo_region }}-*
|
||||
- status=UP
|
||||
register: gcp_compute_zone_info
|
||||
|
||||
- name: Set random available zone as a fact
|
||||
set_fact:
|
||||
algo_zone: "{{ (gcp_compute_zone_info.resources | random(seed=algo_server_name + algo_region + project_id) ).name }}"
|
||||
|
|
|
@ -1,14 +1,8 @@
|
|||
---
|
||||
- name: Clean up the environment
|
||||
file:
|
||||
dest: "{{ gce_venv }}"
|
||||
state: absent
|
||||
when: clean_environment
|
||||
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name:
|
||||
- apache-libcloud
|
||||
- requests>=2.18.4
|
||||
- google-auth>=1.3.0
|
||||
state: latest
|
||||
virtualenv: "{{ gce_venv }}"
|
||||
virtualenv_python: python2.7
|
||||
virtualenv_python: python3
|
||||
|
|
34
roles/cloud-hetzner/tasks/main.yml
Normal file
34
roles/cloud-hetzner/tasks/main.yml
Normal file
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
- name: Build python virtual environment
|
||||
import_tasks: venv.yml
|
||||
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Create an ssh key
|
||||
hetzner.hcloud.ssh_key:
|
||||
name: algo-{{ 999999 | random(seed=lookup('file', SSH_keys.public)) }}
|
||||
public_key: "{{ lookup('file', SSH_keys.public) }}"
|
||||
state: present
|
||||
api_token: "{{ algo_hcloud_token }}"
|
||||
register: hcloud_ssh_key
|
||||
|
||||
- name: Create a server...
|
||||
hetzner.hcloud.server:
|
||||
name: "{{ algo_server_name }}"
|
||||
location: "{{ algo_hcloud_region }}"
|
||||
server_type: "{{ cloud_providers.hetzner.server_type }}"
|
||||
image: "{{ cloud_providers.hetzner.image }}"
|
||||
state: present
|
||||
api_token: "{{ algo_hcloud_token }}"
|
||||
ssh_keys: "{{ hcloud_ssh_key.hcloud_ssh_key.name }}"
|
||||
user_data: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
|
||||
labels:
|
||||
Environment: algo
|
||||
register: hcloud_server
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ hcloud_server.hcloud_server.ipv4_address }}"
|
||||
ansible_ssh_user: algo
|
||||
ansible_ssh_port: "{{ ssh_port }}"
|
||||
cloudinit: true
|
48
roles/cloud-hetzner/tasks/prompts.yml
Normal file
48
roles/cloud-hetzner/tasks/prompts.yml
Normal file
|
@ -0,0 +1,48 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your API token (https://trailofbits.github.io/algo/cloud-hetzner.html#api-token):
|
||||
echo: false
|
||||
register: _hcloud_token
|
||||
when:
|
||||
- hcloud_token is undefined
|
||||
- lookup('env','HCLOUD_TOKEN')|length <= 0
|
||||
|
||||
- name: Set the token as a fact
|
||||
set_fact:
|
||||
algo_hcloud_token: "{{ hcloud_token | default(_hcloud_token.user_input|default(None)) | default(lookup('env','HCLOUD_TOKEN'), true) }}"
|
||||
|
||||
- name: Get regions
|
||||
hetzner.hcloud.datacenter_info:
|
||||
api_token: "{{ algo_hcloud_token }}"
|
||||
register: _hcloud_regions
|
||||
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
hcloud_regions: "{{ _hcloud_regions.hcloud_datacenter_info | sort(attribute='location') }}"
|
||||
|
||||
- name: Set default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in hcloud_regions %}
|
||||
{%- if r['location'] == "nbg1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
{% for r in hcloud_regions %}
|
||||
{{ loop.index }}. {{ r['location'] }} {{ r['description'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
||||
- name: Set additional facts
|
||||
set_fact:
|
||||
algo_hcloud_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input %}{{ hcloud_regions[_algo_region.user_input | int -1 ]['location'] }}
|
||||
{%- else %}{{ hcloud_regions[default_region | int - 1]['location'] }}{% endif %}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue