mirror of
https://github.com/trailofbits/algo.git
synced 2025-07-21 21:13:00 +02:00
Merge remote-tracking branch 'tob/master' into 196-simple-webapp-config
This commit is contained in:
commit
1668f6a904
92 changed files with 1847 additions and 2356 deletions
|
@ -1,3 +1,10 @@
|
|||
skip_list:
|
||||
- yaml
|
||||
- '204'
|
||||
verbosity: 1
|
||||
|
||||
warn_list:
|
||||
- no-changed-when
|
||||
- no-handler
|
||||
- fqcn-builtins
|
||||
- var-spacing
|
||||
|
|
13
.github/dependabot.yml
vendored
Normal file
13
.github/dependabot.yml
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
version: 2
|
||||
updates:
|
||||
# Maintain dependencies for GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
# Maintain dependencies for Python
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
70
.github/workflows/main.yml
vendored
70
.github/workflows/main.yml
vendored
|
@ -4,12 +4,13 @@ on: [push, pull_request]
|
|||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-python@v1
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v2.3.2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.9'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
@ -17,7 +18,7 @@ jobs:
|
|||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
sudo snap install shellcheck
|
||||
pip install ansible-lint
|
||||
pip install ansible-lint==6.3.0
|
||||
|
||||
- name: Checks and linters
|
||||
run: |
|
||||
|
@ -26,43 +27,35 @@ jobs:
|
|||
ansible-lint -x experimental,package-latest,unnamed-task -v *.yml roles/{local,cloud-*}/*/*.yml
|
||||
|
||||
scripted-deploy:
|
||||
runs-on: ubuntu-16.04
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
UBUNTU_VERSION: ["18.04", "20.04"]
|
||||
UBUNTU_VERSION: ["20.04"]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-python@v1
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v2.3.2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.9'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt install -y \
|
||||
python3-pip \
|
||||
lxd \
|
||||
expect-dev \
|
||||
debootstrap \
|
||||
tree \
|
||||
bridge-utils \
|
||||
dnsutils \
|
||||
build-essential \
|
||||
libssl-dev \
|
||||
libffi-dev \
|
||||
python3-dev \
|
||||
linux-headers-$(uname -r) \
|
||||
wireguard \
|
||||
libxml2-utils \
|
||||
crudini \
|
||||
fping \
|
||||
strongswan \
|
||||
libstrongswan-standard-plugins \
|
||||
resolvconf
|
||||
openresolv
|
||||
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install -r requirements.txt
|
||||
|
||||
sudo snap refresh lxd
|
||||
sudo lxd init --auto
|
||||
|
||||
- name: Provision
|
||||
env:
|
||||
DEPLOY: cloud-init
|
||||
|
@ -93,46 +86,37 @@ jobs:
|
|||
sudo -E bash -x ./tests/wireguard-client.sh
|
||||
sudo env "PATH=$PATH" ./tests/ipsec-client.sh
|
||||
|
||||
local-deploy:
|
||||
runs-on: ubuntu-16.04
|
||||
docker-deploy:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
UBUNTU_VERSION: ["18.04", "20.04"]
|
||||
UBUNTU_VERSION: ["20.04"]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-python@v1
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v2.3.2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.9'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
set -x
|
||||
sudo add-apt-repository -yu ppa:ubuntu-lxc/stable
|
||||
sudo apt update -y
|
||||
sudo apt install -y \
|
||||
python3-pip \
|
||||
lxd \
|
||||
expect-dev \
|
||||
debootstrap \
|
||||
tree \
|
||||
bridge-utils \
|
||||
dnsutils \
|
||||
build-essential \
|
||||
libssl-dev \
|
||||
libffi-dev \
|
||||
python3-dev \
|
||||
linux-headers-$(uname -r) \
|
||||
wireguard \
|
||||
libxml2-utils \
|
||||
crudini \
|
||||
fping \
|
||||
strongswan \
|
||||
libstrongswan-standard-plugins \
|
||||
resolvconf
|
||||
openresolv
|
||||
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install -r requirements.txt
|
||||
|
||||
sudo snap refresh lxd
|
||||
sudo lxd init --auto
|
||||
|
||||
- name: Provision
|
||||
env:
|
||||
DEPLOY: docker
|
||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -7,3 +7,4 @@ inventory_users
|
|||
.DS_Store
|
||||
venvs/*
|
||||
!venvs/.gitinit
|
||||
.vagrant
|
||||
|
|
30
README.md
30
README.md
|
@ -35,11 +35,14 @@ The easiest way to get an Algo server running is to run it on your local system
|
|||
|
||||
- Download the [ZIP file](https://github.com/trailofbits/algo/archive/master.zip). Unzip the file to create a directory named `algo-master` containing the Algo scripts.
|
||||
|
||||
- Run the command `git clone https://github.com/trailofbits/algo.git` to create a directory named `algo` containing the Algo scripts.
|
||||
- Use `git clone` to create a directory named `algo` containing the Algo scripts:
|
||||
```bash
|
||||
git clone https://github.com/trailofbits/algo.git
|
||||
```
|
||||
|
||||
3. **Install Algo's core dependencies.** Algo requires that **Python 3.6 or later** and at least one supporting package are installed on your system.
|
||||
3. **Install Algo's core dependencies.** Algo requires that **Python 3.8 or later** and at least one supporting package are installed on your system.
|
||||
|
||||
- **macOS:** Catalina includes Python 3 as part of the optional Command Line Developer Tools package. From Terminal run:
|
||||
- **macOS:** Catalina (10.15) and higher includes Python 3 as part of the optional Command Line Developer Tools package. From Terminal run:
|
||||
|
||||
```bash
|
||||
python3 -m pip install --user --upgrade virtualenv
|
||||
|
@ -47,7 +50,7 @@ The easiest way to get an Algo server running is to run it on your local system
|
|||
|
||||
If prompted, install the Command Line Developer Tools and re-run the above command.
|
||||
|
||||
See [Deploy from macOS](docs/deploy-from-macos.md) for information on installing Python 3 on macOS versions prior to Catalina.
|
||||
For macOS versions prior to Catalina, see [Deploy from macOS](docs/deploy-from-macos.md) for information on installing Python 3 .
|
||||
|
||||
- **Linux:** Recent releases of Ubuntu, Debian, and Fedora come with Python 3 already installed. Make sure your system is up-to-date and install the supporting package(s):
|
||||
* Ubuntu and Debian:
|
||||
|
@ -55,17 +58,13 @@ The easiest way to get an Algo server running is to run it on your local system
|
|||
sudo apt install -y --no-install-recommends python3-virtualenv
|
||||
```
|
||||
On a Raspberry Pi running Ubuntu also install `libffi-dev` and `libssl-dev`.
|
||||
|
||||
* Fedora:
|
||||
```bash
|
||||
sudo dnf install -y python3-virtualenv
|
||||
```
|
||||
* Red Hat and CentOS 7 and later (for earlier versions see this [documentation](docs/deploy-from-redhat-centos6.md)):
|
||||
```bash
|
||||
sudo yum -y install epel-release
|
||||
sudo yum -y install python36-virtualenv
|
||||
```
|
||||
|
||||
- **Windows:** Use the Windows Subsystem for Linux (WSL) to create your own copy of Ubuntu running under Windows from which to install and run Algo. See the [Windows documentation](docs/deploy-from-windows.md).
|
||||
- **Windows:** Use the Windows Subsystem for Linux (WSL) to create your own copy of Ubuntu running under Windows from which to install and run Algo. See the [Windows documentation](docs/deploy-from-windows.md) for more information.
|
||||
|
||||
4. **Install Algo's remaining dependencies.** You'll need to run these commands from the Algo directory each time you download a new copy of Algo. In a Terminal window `cd` into the `algo-master` (ZIP file) or `algo` (`git clone`) directory and run:
|
||||
```bash
|
||||
|
@ -74,11 +73,11 @@ The easiest way to get an Algo server running is to run it on your local system
|
|||
python3 -m pip install -U pip virtualenv &&
|
||||
python3 -m pip install -r requirements.txt
|
||||
```
|
||||
On Fedora add the option `--system-site-packages` to the first command above. On macOS install the C compiler if prompted.
|
||||
On Fedora first run `export TMPDIR=/var/tmp`, then add the option `--system-site-packages` to the first command above (after `python3 -m virtualenv`). On macOS install the C compiler if prompted.
|
||||
|
||||
5. **Set your configuration options.** Open the file `config.cfg` in your favorite text editor. Specify the users you wish to create in the `users` list. Create a unique user for each device you plan to connect to your VPN. If you want to be able to add or delete users later, you **must** select `yes` at the `Do you want to retain the keys (PKI)?` prompt during the deployment. You should also review the other options before deployment, as changing your mind about them later [may require you to deploy a brand new server](https://github.com/trailofbits/algo/blob/master/docs/faq.md#i-deployed-an-algo-server-can-you-update-it-with-new-features).
|
||||
5. **Set your configuration options.** Open the file `config.cfg` in your favorite text editor. Specify the users you wish to create in the `users` list. Create a unique user for each device you plan to connect to your VPN. If you want to add or delete users later, you **must** select `yes` at the `Do you want to retain the keys (PKI)?` prompt during the server deployment. You should also review the other options before deployment, as changing your mind about them later [may require you to deploy a brand new server](https://github.com/trailofbits/algo/blob/master/docs/faq.md#i-deployed-an-algo-server-can-you-update-it-with-new-features).
|
||||
|
||||
6. **Start the deployment.** Return to your terminal. In the Algo directory, run `./algo` and follow the instructions. There are several optional features available. None are required for a fully functional VPN server. These optional features are described in greater detail in [here](docs/deploy-from-ansible.md).
|
||||
6. **Start the deployment.** Return to your terminal. In the Algo directory, run `./algo` and follow the instructions. There are several optional features available, none of which are required for a fully functional VPN server. These optional features are described in greater detail in [here](docs/deploy-from-ansible.md).
|
||||
|
||||
That's it! You will get the message below when the server deployment process completes. Take note of the p12 (user certificate) password and the CA key in case you need them later, **they will only be displayed this time**.
|
||||
|
||||
|
@ -132,6 +131,10 @@ WireGuard works great with Linux clients. See [this page](docs/client-linux-wire
|
|||
|
||||
Please see [this page](docs/client-linux-ipsec.md).
|
||||
|
||||
### OpenWrt Wireguard Clients
|
||||
|
||||
Please see [this page](docs/client-openwrt-router-wireguard.md).
|
||||
|
||||
### Other Devices
|
||||
|
||||
Depending on the platform, you may need one or multiple of the following files.
|
||||
|
@ -204,7 +207,6 @@ After this process completes, the Algo VPN server will contain only the users li
|
|||
* Deploy from [macOS](docs/deploy-from-macos.md)
|
||||
* Deploy from [Windows](docs/deploy-from-windows.md)
|
||||
* Deploy from [Google Cloud Shell](docs/deploy-from-cloudshell.md)
|
||||
* Deploy from [RedHat/CentOS 6.x](docs/deploy-from-redhat-centos6.md)
|
||||
* Deploy from a [Docker container](docs/deploy-from-docker.md)
|
||||
|
||||
### Setup VPN Clients to Connect to the Server
|
||||
|
|
36
Vagrantfile
vendored
Normal file
36
Vagrantfile
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "bento/ubuntu-20.04"
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.name = "algo-20.04"
|
||||
v.memory = "512"
|
||||
v.cpus = "1"
|
||||
end
|
||||
|
||||
config.vm.synced_folder "./", "/opt/algo", create: true
|
||||
|
||||
config.vm.provision "ansible_local" do |ansible|
|
||||
ansible.playbook = "/opt/algo/main.yml"
|
||||
|
||||
# https://github.com/hashicorp/vagrant/issues/12204
|
||||
ansible.pip_install_cmd = "sudo apt-get install -y python3-pip python-is-python3 && sudo ln -s -f /usr/bin/pip3 /usr/bin/pip"
|
||||
ansible.install_mode = "pip_args_only"
|
||||
ansible.pip_args = "-r /opt/algo/requirements.txt"
|
||||
ansible.inventory_path = "/opt/algo/inventory"
|
||||
ansible.limit = "local"
|
||||
ansible.verbose = "-vvvv"
|
||||
ansible.extra_vars = {
|
||||
provider: "local",
|
||||
server: "localhost",
|
||||
ssh_user: "",
|
||||
endpoint: "127.0.0.1",
|
||||
ondemand_cellular: true,
|
||||
ondemand_wifi: false,
|
||||
dns_adblocking: true,
|
||||
ssh_tunneling: true,
|
||||
store_pki: true,
|
||||
tests: true,
|
||||
no_log: false
|
||||
}
|
||||
end
|
||||
end
|
14
cloud.yml
14
cloud.yml
|
@ -8,14 +8,14 @@
|
|||
|
||||
tasks:
|
||||
- block:
|
||||
- name: Local pre-tasks
|
||||
import_tasks: playbooks/cloud-pre.yml
|
||||
- name: Local pre-tasks
|
||||
import_tasks: playbooks/cloud-pre.yml
|
||||
|
||||
- name: Include a provisioning role
|
||||
include_role:
|
||||
name: "{{ 'local' if algo_provider == 'local' else 'cloud-' + algo_provider }}"
|
||||
- name: Include a provisioning role
|
||||
include_role:
|
||||
name: "{{ 'local' if algo_provider == 'local' else 'cloud-' + algo_provider }}"
|
||||
|
||||
- name: Local post-tasks
|
||||
import_tasks: playbooks/cloud-post.yml
|
||||
- name: Local post-tasks
|
||||
import_tasks: playbooks/cloud-post.yml
|
||||
rescue:
|
||||
- include_tasks: playbooks/rescue.yml
|
||||
|
|
16
config.cfg
16
config.cfg
|
@ -88,9 +88,15 @@ dnscrypt_servers:
|
|||
ipv4:
|
||||
- cloudflare
|
||||
# - google
|
||||
# - <YourCustomServer> # E.g., if using NextDNS, this will be something like NextDNS-abc123.
|
||||
# You must also fill in custom_server_stamps below. You may specify
|
||||
# multiple custom servers.
|
||||
ipv6:
|
||||
- cloudflare-ipv6
|
||||
|
||||
custom_server_stamps:
|
||||
# YourCustomServer: 'sdns://...'
|
||||
|
||||
# DNS servers which will be used if 'dns_encryption' is 'false'.
|
||||
# Fallback resolvers for systemd-resolved
|
||||
# The default is to use Cloudflare.
|
||||
|
@ -176,9 +182,13 @@ cloud_providers:
|
|||
size: t2.micro
|
||||
image:
|
||||
name: "ubuntu-focal-20.04"
|
||||
arch: x86_64
|
||||
owner: "099720109477"
|
||||
# Change instance_market_type from "on-demand" to "spot" to launch a spot
|
||||
# instance. See deploy-from-ansible.md for spot's additional IAM permission
|
||||
instance_market_type: on-demand
|
||||
gce:
|
||||
size: f1-micro
|
||||
size: e2-micro
|
||||
image: ubuntu-2004-lts
|
||||
external_static_ip: false
|
||||
lightsail:
|
||||
|
@ -193,13 +203,13 @@ cloud_providers:
|
|||
image: ubuntu-20.04
|
||||
openstack:
|
||||
flavor_ram: ">=512"
|
||||
image: Ubuntu-18.04
|
||||
image: Ubuntu-20.04
|
||||
cloudstack:
|
||||
size: Micro
|
||||
image: Linux Ubuntu 20.04 LTS 64-bit
|
||||
disk: 10
|
||||
vultr:
|
||||
os: Ubuntu 20.04 x64
|
||||
os: Ubuntu 20.04 LTS x64
|
||||
size: 1024 MB RAM,25 GB SSD,1.00 TB BW
|
||||
linode:
|
||||
type: g6-nanode-1
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
ansible_ssh_user: "{{ 'root' if client_ip == 'localhost' else ssh_user }}"
|
||||
vpn_user: "{{ vpn_user }}"
|
||||
IP_subject_alt_name: "{{ server_ip }}"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
- name: Configure the client and install required software
|
||||
hosts: client-host
|
||||
|
|
88
docs/client-openwrt-router-wireguard.md
Normal file
88
docs/client-openwrt-router-wireguard.md
Normal file
|
@ -0,0 +1,88 @@
|
|||
# Using Router wıth OpenWRT as a Client with WireGuard
|
||||
This scenario is useful in case you want to use vpn with devices which has no vpn capability like smart tv, or make vpn connection available via router for multiple devices.
|
||||
This is a tested, working scenario with following environment:
|
||||
|
||||
- algo installed ubuntu at digitalocean
|
||||
- client side router "TP-Link TL-WR1043ND" with openwrt ver. 21.02.1. [Openwrt Install instructions](https://openwrt.org/toh/tp-link/tl-wr1043nd)
|
||||
- or client side router "TP-Link Archer C20i AC750" with openwrt ver. 21.02.1. [Openwrt install instructions](https://openwrt.org/toh/tp-link/archer_c20i)
|
||||
see compatible device list at https://openwrt.org/toh/start . Theoretically any of the device on list should work
|
||||
|
||||
|
||||
|
||||
## Router setup
|
||||
Make sure that you have
|
||||
- router with openwrt installed,
|
||||
- router is connected to internet,
|
||||
- router and device in front of router does not have same ip . By default openwrt have 192.168.1.1 if so change it to something like 192.168.2.1
|
||||
### Install required packages(WebUI)
|
||||
- Open router web UI (mostly http://192.168.1.1 )
|
||||
- Login. (by default username: root, password:<empt
|
||||
- System -> Software, click "Update lists"
|
||||
- Install following packages wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5
|
||||
- restart router
|
||||
|
||||
### Aluternative Install required packages(ssh)
|
||||
- Open router web UI (mostly http://192.168.1.1 )
|
||||
- ssh root@192.168.1.1
|
||||
- opkg update
|
||||
- opkg install wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5
|
||||
- reboot
|
||||
|
||||
### Create an Interface(WebUI)
|
||||
- Open router web UI
|
||||
- Navigate Network -> Interface
|
||||
- Click "Add new interface"
|
||||
- Give a Name. e.g. `AlgoVpn`
|
||||
- Select Protocol. `Wireguard VPN`
|
||||
- click `Create Interface`
|
||||
- In *General Settings* tab
|
||||
- `Bring up on boot` *checked*
|
||||
- Private key: `Interface -> Private Key` from algo config file
|
||||
- Ip Address: `Interface -> Address` from algo config file
|
||||
- In *Peers* tab
|
||||
- Click add
|
||||
- Name `algo`
|
||||
- Public key: `[Peer]->PublicKey` from algo config file
|
||||
- Preshared key: `[Peer]->PresharedKey` from algo config file
|
||||
- Allowed IPs: 0.0.0.0/0
|
||||
- Route Allowed IPs: checked
|
||||
- Endpoint Host: `[Peer]->Endpoint` ip from algo config file
|
||||
- Endpoint Port: `[Peer]->Endpoint` port from algo config file
|
||||
- Persistent Keep Alive: `25`
|
||||
- Click Save & Save Apply
|
||||
|
||||
### Configure Firewall(WebUI)
|
||||
- Open router web UI
|
||||
- Navigate to Network -> Firewall
|
||||
- Click `Add configuration`:
|
||||
- Name: e.g. ivpn_fw
|
||||
- Input: Reject
|
||||
- Output: Accept
|
||||
- Forward: Reject
|
||||
- Masquerading: Checked
|
||||
- MSS clamping: Checked
|
||||
- Covered networks: Select created VPN interface
|
||||
- Allow forward to destination zones - Unspecified
|
||||
- Allow forward from source zones - lan
|
||||
- Click Save & Save Apply
|
||||
- Reboot router
|
||||
|
||||
|
||||
There may be additional configuration required depending on environment like dns configuration.
|
||||
|
||||
You can also verify the configuration using ssh. /etc/config/network. It should look like
|
||||
|
||||
```
|
||||
config interface 'algo'
|
||||
option proto 'wireguard'
|
||||
list addresses '10.0.0.2/32'
|
||||
option private_key '......' # The private key generated by itself just now
|
||||
|
||||
config wireguard_wg0
|
||||
option public_key '......' # Server's public key
|
||||
option route_allowed_ips '1'
|
||||
list allowed_ips '0.0.0.0/0'
|
||||
option endpoint_host '......' # Server's public ip address
|
||||
option endpoint_port '51820'
|
||||
option persistent_keepalive '25'
|
||||
```
|
|
@ -12,6 +12,8 @@ The cheapest EC2 plan you can choose is the "Free Plan" a.k.a. the "AWS Free Tie
|
|||
|
||||
As of the time of this writing (July 2018), the Free Tier limits include "750 hours of Amazon EC2 Linux t2.micro instance usage" per month, 15 GB of bandwidth (outbound) per month, and 30 GB of cloud storage. Algo will not even use 1% of the storage limit, but you may have to monitor your bandwidth usage or keep an eye out for the email from Amazon when you are about to exceed the Free Tier limits.
|
||||
|
||||
Addtional configurations are documented in the [EC2 section of the deploy from ansible guide](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#amazon-ec2)
|
||||
|
||||
### Create an AWS permissions policy
|
||||
|
||||
In the AWS console, find the policies menu: click Services > IAM > Policies. Click Create Policy.
|
||||
|
@ -48,22 +50,27 @@ On the final screen, click the Download CSV button. This file includes the AWS a
|
|||
|
||||
After you have downloaded Algo and installed its dependencies, the next step is running Algo to provision the VPN server on your AWS account.
|
||||
|
||||
First you will be asked which server type to setup. You would want to enter "2" to use Amazon EC2.
|
||||
First you will be asked which server type to setup. You would want to enter "3" to use Amazon EC2.
|
||||
|
||||
```
|
||||
$ ./algo
|
||||
|
||||
What provider would you like to use?
|
||||
1. DigitalOcean
|
||||
2. Amazon EC2
|
||||
3. Microsoft Azure
|
||||
4. Google Compute Engine
|
||||
5. Scaleway
|
||||
6. OpenStack (DreamCompute optimised)
|
||||
7. Install to existing Ubuntu 16.04 server (Advanced)
|
||||
2. Amazon Lightsail
|
||||
3. Amazon EC2
|
||||
4. Microsoft Azure
|
||||
5. Google Compute Engine
|
||||
6. Hetzner Cloud
|
||||
7. Vultr
|
||||
8. Scaleway
|
||||
9. OpenStack (DreamCompute optimised)
|
||||
10. CloudStack (Exoscale optimised)
|
||||
11. Linode
|
||||
12. Install to existing Ubuntu 18.04 or 20.04 server (for more advanced users)
|
||||
|
||||
Enter the number of your desired provider
|
||||
: 2
|
||||
: 3
|
||||
```
|
||||
|
||||
Next you will be asked for the AWS Access Key (Access Key ID) and AWS Secret Key (Secret Access Key) that you received in the CSV file when you setup the account (don't worry if you don't see your text entered in the console; the key input is hidden here by Algo).
|
||||
|
|
|
@ -1,20 +1,11 @@
|
|||
### Configuration file
|
||||
|
||||
You need to create a configuration file in INI format with your api key in `$HOME/.cloudstack.ini`
|
||||
Algo scripts will ask you for the API detail. You need to fetch the API credentials and the endpoint from the provider cocntrol panel.
|
||||
|
||||
```
|
||||
[cloudstack]
|
||||
endpoint = <endpoint>
|
||||
key = <your api key>
|
||||
secret = <your secret>
|
||||
timeout = 30
|
||||
```
|
||||
Example for Exoscale (European cloud provider exposing CloudStack API), visit https://portal.exoscale.com/u/<your@account>/account/profile/api to gather the required information: CloudStack api key and secret.
|
||||
|
||||
Example for Exoscale (European cloud provider exposing CloudStack API), visit https://portal.exoscale.com/u/<your@account>/account/profile/api to gather the required information:
|
||||
```
|
||||
[exoscale]
|
||||
endpoint = https://api.exoscale.com/compute
|
||||
key = <your api key>
|
||||
secret = <your secret>
|
||||
timeout = 30
|
||||
```bash
|
||||
export CLOUDSTACK_KEY="<your api key>"
|
||||
export CLOUDSTACK_SECRET="<your secret>"
|
||||
export CLOUDSTACK_ENDPOINT="https://api.exoscale.com/compute"
|
||||
```
|
||||
|
|
|
@ -4,5 +4,6 @@ Sign into the Linode Manager and go to the
|
|||
[tokens management page](https://cloud.linode.com/profile/tokens).
|
||||
|
||||
Click `Add a Personal Access Token`. Label your new token and select *at least* the
|
||||
`Linodes` read/write permission. Press `Submit` and make sure to copy the displayed token
|
||||
`Linodes` read/write permission and `StackScripts` read/write permission.
|
||||
Press `Submit` and make sure to copy the displayed token
|
||||
as it won't be shown again.
|
||||
|
|
|
@ -110,7 +110,21 @@ Possible options can be gathered via cli `aws ec2 describe-regions`
|
|||
|
||||
Additional variables:
|
||||
|
||||
- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: false)
|
||||
- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: true)
|
||||
- [size](https://aws.amazon.com/ec2/instance-types/) - EC2 instance type. String (Default: t2.micro)
|
||||
- [image](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-images.html) - AMI `describe-images` search parameters to find the OS for the hosted image. Each OS and architecture has a unique AMI-ID. The OS owner, for example [Ubuntu](https://cloud-images.ubuntu.com/locator/ec2/), updates these images often. If parameters below result in multiple results, the most recent AMI-ID is chosen
|
||||
```
|
||||
# Example of equivalent cli comand
|
||||
aws ec2 describe-images --owners "099720109477" --filters "Name=architecture,Values=arm64" "Name=name,Values=ubuntu/images/hvm-ssd/ubuntu-focal-20.04*"
|
||||
```
|
||||
- [owners] - The operating system owner id. Default is [Canonical](https://help.ubuntu.com/community/EC2StartersGuide#Official_Ubuntu_Cloud_Guest_Amazon_Machine_Images_.28AMIs.29) (Default: 099720109477)
|
||||
- [arch] - The architecture (Default: x86_64, Optional: arm64)
|
||||
- [name] - The wildcard string to filter available ami names. Algo appends this name with the string "-\*64-server-\*", and prepends with "ubuntu/images/hvm-ssd/" (Default: ubuntu-focal-20.04)
|
||||
- [instance_market_type](https://aws.amazon.com/ec2/pricing/) - Two pricing models are supported: on-demand and spot. String (Default: on-demand)
|
||||
* If using spot instance types, one additional IAM permission along with the below minimum is required for deployment:
|
||||
```
|
||||
"ec2:CreateLaunchTemplate"
|
||||
```
|
||||
|
||||
#### Minimum required IAM permissions for deployment:
|
||||
|
||||
|
@ -156,9 +170,12 @@ Additional variables:
|
|||
"ec2:CreateVpc",
|
||||
"ec2:DescribeInternetGateways",
|
||||
"ec2:ModifyVpcAttribute",
|
||||
"ec2:createTags",
|
||||
"ec2:CreateTags",
|
||||
"ec2:CreateSubnet",
|
||||
"ec2:Associate*",
|
||||
"ec2:AssociateVpcCidrBlock",
|
||||
"ec2:AssociateSubnetCidrBlock",
|
||||
"ec2:AssociateRouteTable",
|
||||
"ec2:AssociateAddress",
|
||||
"ec2:CreateRouteTable",
|
||||
"ec2:AttachInternetGateway",
|
||||
"ec2:DescribeRouteTables",
|
||||
|
@ -228,7 +245,27 @@ Possible options can be gathered via cli `aws lightsail get-regions`
|
|||
"lightsail:GetRegions",
|
||||
"lightsail:GetInstance",
|
||||
"lightsail:CreateInstances",
|
||||
"lightsail:OpenInstancePublicPorts"
|
||||
"lightsail:DisableAddOn",
|
||||
"lightsail:PutInstancePublicPorts",
|
||||
"lightsail:StartInstance",
|
||||
"lightsail:TagResource",
|
||||
"lightsail:GetStaticIp",
|
||||
"lightsail:AllocateStaticIp",
|
||||
"lightsail:AttachStaticIp"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "DeployCloudFormationStack",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"cloudformation:CreateStack",
|
||||
"cloudformation:UpdateStack",
|
||||
"cloudformation:DescribeStacks",
|
||||
"cloudformation:DescribeStackEvents",
|
||||
"cloudformation:ListStackResources"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Deploy from Google Cloud Shell
|
||||
**IMPORTANT NOTE: As of 2021-12-14 Algo requires Python 3.8, but Google Cloud Shell only provides Python 3.7.3. The instructions below will not work until Google updates Cloud Shell to have at least Python 3.8.**
|
||||
|
||||
If you want to try Algo but don't wish to install the software on your own system you can use the **free** [Google Cloud Shell](https://cloud.google.com/shell/) to deploy a VPN to any supported cloud provider. Note that you cannot choose `Install to existing Ubuntu server` to turn Google Cloud Shell into your VPN server.
|
||||
|
||||
|
|
|
@ -1,97 +0,0 @@
|
|||
# RedHat/CentOS 6.x pre-installation requirements
|
||||
|
||||
Many people prefer RedHat or CentOS 6 (or similar variants like Amazon Linux) for to their stability and lack of systemd. Unfortunately, there are a number of dated libraries, notably Python 2.6, that prevent Algo from running without errors. This script will prepare a RedHat, CentOS, or similar VM to deploy to Algo cloud instances.
|
||||
|
||||
## Step 1: Prep for RH/CentOS 6.8/Amazon
|
||||
|
||||
```shell
|
||||
yum -y update
|
||||
yum -y install epel-release
|
||||
```
|
||||
|
||||
Enable any kernel updates:
|
||||
|
||||
```shell
|
||||
reboot
|
||||
```
|
||||
|
||||
## Step 2: Install Ansible and launch Algo
|
||||
|
||||
RedHat/CentOS 6.x uses Python 2.6 by default, which is explicitly deprecated and produces many warnings and errors, so we must install a safe, non-invasive 3.6 tool set which has to be expressly enabled (and will not survive login sessions and reboots):
|
||||
|
||||
- Install the Software Collections Library (to enable Python 3.6)
|
||||
```shell
|
||||
yum -y install centos-release-SCL
|
||||
yum -y install \
|
||||
openssl-devel \
|
||||
libffi-devel \
|
||||
automake \
|
||||
gcc \
|
||||
gcc-c++ \
|
||||
kernel-devel \
|
||||
rh-python36-python \
|
||||
rh-python36-python-devel \
|
||||
rh-python36-python-setuptools \
|
||||
rh-python36-python-pip \
|
||||
rh-python36-python-virtualenv \
|
||||
rh-python36-python-crypto \
|
||||
rh-python36-PyYAML \
|
||||
libselinux-python \
|
||||
python-crypto \
|
||||
wget \
|
||||
unzip \
|
||||
nano
|
||||
```
|
||||
|
||||
- 3.6 will not be used until explicitly enabled, per login session. Enable 3.6 default for this session (needs re-run between logins & reboots)
|
||||
```
|
||||
scl enable rh-python36 bash
|
||||
```
|
||||
|
||||
- We're now defaulted to 3.6. Upgrade required components
|
||||
```
|
||||
python3 -m pip install -U pip virtualenv pycrypto setuptools
|
||||
```
|
||||
|
||||
- Download and uzip Algo
|
||||
```
|
||||
wget https://github.com/trailofbits/algo/archive/master.zip
|
||||
unzip master.zip
|
||||
cd algo-master || echo "No Algo directory found"
|
||||
```
|
||||
|
||||
- Set up a virtualenv and install the local Algo dependencies (must be run from algo-master)
|
||||
```
|
||||
python3 -m virtualenv --python="$(command -v python3)" .env
|
||||
source .env/bin/activate
|
||||
python3 -m pip install -U pip virtualenv
|
||||
python3 -m pip install -r requirements.txt
|
||||
```
|
||||
|
||||
- Edit the userlist and any other settings you desire
|
||||
```
|
||||
nano config.cfg
|
||||
```
|
||||
|
||||
- Now you can run the Algo installer!
|
||||
```
|
||||
./algo
|
||||
```
|
||||
|
||||
## Post-install macOS
|
||||
|
||||
1. Copy `./configs/*mobileconfig` to your local Mac
|
||||
|
||||
2. Install the VPN profile on your Mac (10.10+ required)
|
||||
|
||||
```shell
|
||||
/usr/bin/profiles -I -F ./x.x.x.x_NAME.mobileconfig
|
||||
```
|
||||
|
||||
3. To remove:
|
||||
|
||||
```shell
|
||||
/usr/bin/profiles -D -F ./x.x.x.x_NAME.mobileconfig
|
||||
```
|
||||
|
||||
The VPN connection will now appear under Networks (which can be pinned to the top menu bar if preferred)
|
|
@ -21,7 +21,7 @@ Wait a minute for Windows to install a few things in the background (it will eve
|
|||
2. Click on 'Turn Windows features on or off'
|
||||
3. Scroll down and check 'Windows Subsystem for Linux', and then click OK.
|
||||
4. The subsystem will be installed, then Windows will require a restart.
|
||||
5. Restart Windows and then [install Ubuntu 18.04 LTS from the Windows Store](https://www.microsoft.com/p/ubuntu-1804-lts/9n9tngvndl3q) (at this time Ubuntu 20.04 LTS does not work with Algo when running under WSL).
|
||||
5. Restart Windows and then install [Ubuntu 20.04 LTS from the Windows Store](https://www.microsoft.com/p/ubuntu-2004-lts/9n6svws3rx71).
|
||||
6. Run Ubuntu from the Start menu. It will take a few minutes to install. It will have you create a separate user account for the Linux subsystem. Once that's done, you will finally have Ubuntu running somewhat integrated with Windows.
|
||||
|
||||
## Install Algo
|
||||
|
@ -39,6 +39,32 @@ git clone https://github.com/trailofbits/algo
|
|||
cd algo
|
||||
```
|
||||
|
||||
## Post installation steps
|
||||
|
||||
These steps should be only if you clone the Algo repository to the host machine disk (C:, D:, etc.). WSL mount host system disks to `\mnt` directory.
|
||||
|
||||
### Allow git to change files metadata
|
||||
|
||||
By default git cannot change files metadata (using chmod for example) for files stored at host machine disks (https://docs.microsoft.com/en-us/windows/wsl/wsl-config#set-wsl-launch-settings). Allow it:
|
||||
|
||||
1. Start Ubuntu Terminal.
|
||||
2. Edit /etc/wsl.conf (create it if it doesn't exist). Add the following:
|
||||
```
|
||||
[automount]
|
||||
options = "metadata"
|
||||
```
|
||||
3. Close all Ubuntu Terminals.
|
||||
4. Run powershell.
|
||||
5. Run `wsl --shutdown` in powershell.
|
||||
|
||||
### Allow run Ansible in a world writable directory
|
||||
|
||||
Ansible threat host machine directories as world writable directory and do not load .cfg from it by default (https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir). For fix run inside `algo` directory:
|
||||
|
||||
```shell
|
||||
chmod 744 .
|
||||
```
|
||||
|
||||
Now you can continue by following the [README](https://github.com/trailofbits/algo#deploy-the-algo-server) from the 4th step to deploy your Algo server!
|
||||
|
||||
You'll be instructed to edit the file `config.cfg` in order to specify the Algo user accounts to be created. If you're new to Linux the simplest editor to use is `nano`. To edit the file while in the `algo` directory, run:
|
||||
|
|
|
@ -1,5 +1,11 @@
|
|||
# Local Installation
|
||||
|
||||
**PLEASE NOTE**: Algo is intended for use to create a _dedicated_ VPN server. No uninstallation option is provided. If you install Algo on an existing server any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
|
||||
|
||||
------
|
||||
|
||||
## Outbound VPN Server
|
||||
|
||||
You can use Algo to configure a pre-existing server as an AlgoVPN rather than using it to create and configure a new server on a supported cloud provider. This is referred to as a **local** installation rather than a **cloud** deployment. If you're new to Algo or unfamiliar with Linux you'll find a cloud deployment to be easier.
|
||||
|
||||
To perform a local installation, install the Algo scripts following the normal installation instructions, then choose:
|
||||
|
@ -8,11 +14,9 @@ Install to existing Ubuntu 18.04 or 20.04 server (for more advanced users)
|
|||
```
|
||||
Make sure your target server is running an unmodified copy of the operating system version specified. The target can be the same system where you've installed the Algo scripts, or a remote system that you are able to access as root via SSH without needing to enter the SSH key passphrase (such as when using `ssh-agent`).
|
||||
|
||||
# Road Warrior setup
|
||||
## Inbound VPN Server (also called "Road Warrior" setup)
|
||||
|
||||
Some may find it useful to set up an Algo server on an Ubuntu box on your home LAN, with the intention of being able to securely access your LAN and any resources on it when you're traveling elsewhere (the ["road warrior" setup](https://en.wikipedia.org/wiki/Road_warrior_(computing))). A few tips if you're doing so:
|
||||
- Make sure you forward any [relevant incoming ports](/docs/firewalls.md#external-firewall) to the Algo server from your router;
|
||||
- Change `BetweenClients_DROP` in `config.cfg` to `false`, and also consider changing `block_smb` and `block_netbios` to `false`;
|
||||
- If you want to use a DNS server on your LAN to resolve local domain names properly (e.g. a Pi-hole), set the `dns_encryption` flag in `config.cfg` to `false`, and change `dns_servers` to the local DNS server IP (i.e. `192.168.1.2`).
|
||||
|
||||
**PLEASE NOTE**: Algo is intended for use to create a _dedicated_ VPN server. No uninstallation option is provided. If you install Algo on an existing server any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
|
||||
|
|
|
@ -42,7 +42,7 @@ Look here if you have a problem running the installer to set up a new Algo serve
|
|||
|
||||
### Python version is not supported
|
||||
|
||||
The minimum Python version required to run Algo is 3.6. Most modern operation systems should have it by default, but if the OS you are using doesn't meet the requirements, you have to upgrade. See the official documentation for your OS, or manual download it from https://www.python.org/downloads/. Otherwise, you may [deploy from docker](deploy-from-docker.md)
|
||||
The minimum Python version required to run Algo is 3.8. Most modern operation systems should have it by default, but if the OS you are using doesn't meet the requirements, you have to upgrade. See the official documentation for your OS, or manual download it from https://www.python.org/downloads/. Otherwise, you may [deploy from docker](deploy-from-docker.md)
|
||||
|
||||
### Error: "You have not agreed to the Xcode license agreements"
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@ EOF
|
|||
test -d /home/algo/.ssh || sudo -u algo mkdir -m 0700 /home/algo/.ssh
|
||||
echo "{{ lookup('file', '{{ SSH_keys.public }}') }}" | (sudo -u algo tee /home/algo/.ssh/authorized_keys && chmod 0600 /home/algo/.ssh/authorized_keys)
|
||||
|
||||
ufw --force reset
|
||||
|
||||
# shellcheck disable=SC2015
|
||||
dpkg -l sshguard && until apt-get remove -y --purge sshguard; do
|
||||
sleep 3
|
||||
|
|
|
@ -25,5 +25,6 @@ write_files:
|
|||
|
||||
runcmd:
|
||||
- set -x
|
||||
- ufw --force reset
|
||||
- sudo apt-get remove -y --purge sshguard || true
|
||||
- systemctl restart sshd.service
|
||||
|
|
207
input.yml
207
input.yml
|
@ -18,127 +18,126 @@
|
|||
- { name: Google Compute Engine, alias: gce }
|
||||
- { name: Hetzner Cloud, alias: hetzner }
|
||||
- { name: Vultr, alias: vultr }
|
||||
- { name: Scaleway, alias: scaleway}
|
||||
- { name: Scaleway, alias: scaleway }
|
||||
- { name: OpenStack (DreamCompute optimised), alias: openstack }
|
||||
- { name: CloudStack (Exoscale optimised), alias: cloudstack }
|
||||
- { name: Linode, alias: linode }
|
||||
- { name: "Install to existing Ubuntu 18.04 or 20.04 server (for more advanced users)", alias: local }
|
||||
- { name: Install to existing Ubuntu 18.04 or 20.04 server (for more advanced users), alias: local }
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
tasks:
|
||||
- block:
|
||||
- name: Cloud prompt
|
||||
pause:
|
||||
prompt: |
|
||||
What provider would you like to use?
|
||||
{% for p in providers_map %}
|
||||
{{ loop.index }}. {{ p['name'] }}
|
||||
{% endfor %}
|
||||
- name: Cloud prompt
|
||||
pause:
|
||||
prompt: |
|
||||
What provider would you like to use?
|
||||
{% for p in providers_map %}
|
||||
{{ loop.index }}. {{ p['name'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired provider
|
||||
register: _algo_provider
|
||||
when: provider is undefined
|
||||
Enter the number of your desired provider
|
||||
register: _algo_provider
|
||||
when: provider is undefined
|
||||
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}"
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}"
|
||||
|
||||
- name: VPN server name prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Name the vpn server
|
||||
[algo]
|
||||
register: _algo_server_name
|
||||
when:
|
||||
- server_name is undefined
|
||||
- algo_provider != "local"
|
||||
- name: VPN server name prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Name the vpn server
|
||||
[algo]
|
||||
register: _algo_server_name
|
||||
when:
|
||||
- server_name is undefined
|
||||
- algo_provider != "local"
|
||||
|
||||
- name: Cellular On Demand prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS clients to enable "Connect On Demand" when connected to cellular networks?
|
||||
[y/N]
|
||||
register: _ondemand_cellular
|
||||
when: ondemand_cellular is undefined
|
||||
- name: Cellular On Demand prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS clients to enable "Connect On Demand" when connected to cellular networks?
|
||||
[y/N]
|
||||
register: _ondemand_cellular
|
||||
when: ondemand_cellular is undefined
|
||||
|
||||
- name: Wi-Fi On Demand prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS clients to enable "Connect On Demand" when connected to Wi-Fi?
|
||||
[y/N]
|
||||
register: _ondemand_wifi
|
||||
when: ondemand_wifi is undefined
|
||||
- name: Wi-Fi On Demand prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS clients to enable "Connect On Demand" when connected to Wi-Fi?
|
||||
[y/N]
|
||||
register: _ondemand_wifi
|
||||
when: ondemand_wifi is undefined
|
||||
|
||||
- name: Trusted Wi-Fi networks prompt
|
||||
pause:
|
||||
prompt: |
|
||||
List the names of any trusted Wi-Fi networks where macOS/iOS clients should not use "Connect On Demand"
|
||||
(e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
|
||||
register: _ondemand_wifi_exclude
|
||||
when:
|
||||
- ondemand_wifi_exclude is undefined
|
||||
- (ondemand_wifi|default(false)|bool) or
|
||||
(booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
|
||||
- name: Trusted Wi-Fi networks prompt
|
||||
pause:
|
||||
prompt: |
|
||||
List the names of any trusted Wi-Fi networks where macOS/iOS clients should not use "Connect On Demand"
|
||||
(e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
|
||||
register: _ondemand_wifi_exclude
|
||||
when:
|
||||
- ondemand_wifi_exclude is undefined
|
||||
- (ondemand_wifi|default(false)|bool) or (booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
|
||||
|
||||
- name: Retain the PKI prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want to retain the keys (PKI)? (required to add users in the future, but less secure)
|
||||
[y/N]
|
||||
register: _store_pki
|
||||
when:
|
||||
- store_pki is undefined
|
||||
- ipsec_enabled
|
||||
- name: Retain the PKI prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want to retain the keys (PKI)? (required to add users in the future, but less secure)
|
||||
[y/N]
|
||||
register: _store_pki
|
||||
when:
|
||||
- store_pki is undefined
|
||||
- ipsec_enabled
|
||||
|
||||
- name: DNS adblocking prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want to enable DNS ad blocking on this VPN server?
|
||||
[y/N]
|
||||
register: _dns_adblocking
|
||||
when: dns_adblocking is undefined
|
||||
- name: DNS adblocking prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want to enable DNS ad blocking on this VPN server?
|
||||
[y/N]
|
||||
register: _dns_adblocking
|
||||
when: dns_adblocking is undefined
|
||||
|
||||
- name: SSH tunneling prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want each user to have their own account for SSH tunneling?
|
||||
[y/N]
|
||||
register: _ssh_tunneling
|
||||
when: ssh_tunneling is undefined
|
||||
- name: SSH tunneling prompt
|
||||
pause:
|
||||
prompt: |
|
||||
Do you want each user to have their own account for SSH tunneling?
|
||||
[y/N]
|
||||
register: _ssh_tunneling
|
||||
when: ssh_tunneling is undefined
|
||||
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_server_name: >-
|
||||
{% if server_name is defined %}{% set _server = server_name %}
|
||||
{%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input|length > 0 -%}
|
||||
{%- set _server = _algo_server_name.user_input -%}
|
||||
{%- else %}{% set _server = defaults['server_name'] %}{% endif -%}
|
||||
{{ _server | regex_replace('(?!\.)(\W|_)', '-') }}
|
||||
algo_ondemand_cellular: >-
|
||||
{% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }}
|
||||
{%- elif _ondemand_cellular.user_input is defined %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi: >-
|
||||
{% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }}
|
||||
{%- elif _ondemand_wifi.user_input is defined %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi_exclude: >-
|
||||
{% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }}
|
||||
{%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input|length > 0 -%}
|
||||
{{ _ondemand_wifi_exclude.user_input | b64encode }}
|
||||
{%- else %}{{ '_null' | b64encode }}{% endif %}
|
||||
algo_dns_adblocking: >-
|
||||
{% if dns_adblocking is defined %}{{ dns_adblocking | bool }}
|
||||
{%- elif _dns_adblocking.user_input is defined %}{{ booleans_map[_dns_adblocking.user_input] | default(defaults['dns_adblocking']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ssh_tunneling: >-
|
||||
{% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }}
|
||||
{%- elif _ssh_tunneling.user_input is defined %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_store_pki: >-
|
||||
{% if ipsec_enabled %}{%- if store_pki is defined %}{{ store_pki | bool }}
|
||||
{%- elif _store_pki.user_input is defined %}{{ booleans_map[_store_pki.user_input] | default(defaults['store_pki']) }}
|
||||
{%- else %}false{% endif %}{% endif %}
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_server_name: >-
|
||||
{% if server_name is defined %}{% set _server = server_name %}
|
||||
{%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input|length > 0 -%}
|
||||
{%- set _server = _algo_server_name.user_input -%}
|
||||
{%- else %}{% set _server = defaults['server_name'] %}{% endif -%}
|
||||
{{ _server | regex_replace('(?!\.)(\W|_)', '-') }}
|
||||
algo_ondemand_cellular: >-
|
||||
{% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }}
|
||||
{%- elif _ondemand_cellular.user_input is defined %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi: >-
|
||||
{% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }}
|
||||
{%- elif _ondemand_wifi.user_input is defined %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi_exclude: >-
|
||||
{% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }}
|
||||
{%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input|length > 0 -%}
|
||||
{{ _ondemand_wifi_exclude.user_input | b64encode }}
|
||||
{%- else %}{{ '_null' | b64encode }}{% endif %}
|
||||
algo_dns_adblocking: >-
|
||||
{% if dns_adblocking is defined %}{{ dns_adblocking | bool }}
|
||||
{%- elif _dns_adblocking.user_input is defined %}{{ booleans_map[_dns_adblocking.user_input] | default(defaults['dns_adblocking']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ssh_tunneling: >-
|
||||
{% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }}
|
||||
{%- elif _ssh_tunneling.user_input is defined %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_store_pki: >-
|
||||
{% if ipsec_enabled %}{%- if store_pki is defined %}{{ store_pki | bool }}
|
||||
{%- elif _store_pki.user_input is defined %}{{ booleans_map[_store_pki.user_input] | default(defaults['store_pki']) }}
|
||||
{%- else %}false{% endif %}{% endif %}
|
||||
rescue:
|
||||
- include_tasks: playbooks/rescue.yml
|
||||
|
|
13
install.sh
13
install.sh
|
@ -22,16 +22,7 @@ installRequirements() {
|
|||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update
|
||||
apt-get install \
|
||||
software-properties-common \
|
||||
git \
|
||||
build-essential \
|
||||
libssl-dev \
|
||||
libffi-dev \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-virtualenv \
|
||||
bind9-host \
|
||||
jq -y
|
||||
}
|
||||
|
||||
|
@ -50,7 +41,7 @@ publicIpFromInterface() {
|
|||
echo "Couldn't find a valid ipv4 address, using the first IP found on the interfaces as the endpoint."
|
||||
DEFAULT_INTERFACE="$(ip -4 route list match default | grep -Eo "dev .*" | awk '{print $2}')"
|
||||
ENDPOINT=$(ip -4 addr sh dev "$DEFAULT_INTERFACE" | grep -w inet | head -n1 | awk '{print $2}' | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b')
|
||||
export ENDPOINT=$ENDPOINT
|
||||
export ENDPOINT="${ENDPOINT}"
|
||||
echo "Using ${ENDPOINT} as the endpoint"
|
||||
}
|
||||
|
||||
|
@ -66,7 +57,7 @@ publicIpFromMetadata() {
|
|||
fi
|
||||
|
||||
if echo "${ENDPOINT}" | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"; then
|
||||
export ENDPOINT=$ENDPOINT
|
||||
export ENDPOINT="${ENDPOINT}"
|
||||
echo "Using ${ENDPOINT} as the endpoint"
|
||||
else
|
||||
publicIpFromInterface
|
||||
|
|
|
@ -1,110 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.cloudstack import (
|
||||
AnsibleCloudStack,
|
||||
cs_argument_spec,
|
||||
cs_required_together,
|
||||
)
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cloudstack_zones
|
||||
short_description: List zones on Apache CloudStack based clouds.
|
||||
description:
|
||||
- List zones.
|
||||
version_added: '0.1'
|
||||
author: Julien Bachmann (@0xmilkmix)
|
||||
extends_documentation_fragment: cloudstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List zones
|
||||
cloudstack_zones:
|
||||
register: _cs_zones
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
zone:
|
||||
description: List of zones.
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
[
|
||||
{
|
||||
"allocationstate": "Enabled",
|
||||
"dhcpprovider": "VirtualRouter",
|
||||
"id": "<id>",
|
||||
"localstorageenabled": true,
|
||||
"name": "ch-gva-2",
|
||||
"networktype": "Basic",
|
||||
"securitygroupsenabled": true,
|
||||
"tags": [],
|
||||
"zonetoken": "token"
|
||||
},
|
||||
{
|
||||
"allocationstate": "Enabled",
|
||||
"dhcpprovider": "VirtualRouter",
|
||||
"id": "<id>",
|
||||
"localstorageenabled": true,
|
||||
"name": "ch-dk-2",
|
||||
"networktype": "Basic",
|
||||
"securitygroupsenabled": true,
|
||||
"tags": [],
|
||||
"zonetoken": "token"
|
||||
},
|
||||
{
|
||||
"allocationstate": "Enabled",
|
||||
"dhcpprovider": "VirtualRouter",
|
||||
"id": "<id>",
|
||||
"localstorageenabled": true,
|
||||
"name": "at-vie-1",
|
||||
"networktype": "Basic",
|
||||
"securitygroupsenabled": true,
|
||||
"tags": [],
|
||||
"zonetoken": "token"
|
||||
},
|
||||
{
|
||||
"allocationstate": "Enabled",
|
||||
"dhcpprovider": "VirtualRouter",
|
||||
"id": "<id>",
|
||||
"localstorageenabled": true,
|
||||
"name": "de-fra-1",
|
||||
"networktype": "Basic",
|
||||
"securitygroupsenabled": true,
|
||||
"tags": [],
|
||||
"zonetoken": "token"
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
class AnsibleCloudStackZones(AnsibleCloudStack):
|
||||
|
||||
def __init__(self, module):
|
||||
super(AnsibleCloudStackZones, self).__init__(module)
|
||||
self.zones = None
|
||||
|
||||
def get_zones(self):
|
||||
args = {}
|
||||
if not self.zones:
|
||||
zones = self.query_api('listZones', **args)
|
||||
if zones:
|
||||
self.zones = zones
|
||||
return self.zones
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec={})
|
||||
acs_zones = AnsibleCloudStackZones(module)
|
||||
result = acs_zones.get_zones()
|
||||
module.exit_json(**result)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,551 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lightsail
|
||||
short_description: Create or delete a virtual machine instance in AWS Lightsail
|
||||
description:
|
||||
- Creates or instances in AWS Lightsail and optionally wait for it to be 'running'.
|
||||
version_added: "2.4"
|
||||
author: "Nick Ball (@nickball)"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the target.
|
||||
default: present
|
||||
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
|
||||
name:
|
||||
description:
|
||||
- Name of the instance
|
||||
required: true
|
||||
default : null
|
||||
zone:
|
||||
description:
|
||||
- AWS availability zone in which to launch the instance. Required when state='present'
|
||||
required: false
|
||||
default: null
|
||||
blueprint_id:
|
||||
description:
|
||||
- ID of the instance blueprint image. Required when state='present'
|
||||
required: false
|
||||
default: null
|
||||
bundle_id:
|
||||
description:
|
||||
- Bundle of specification info for the instance. Required when state='present'
|
||||
required: false
|
||||
default: null
|
||||
user_data:
|
||||
description:
|
||||
- Launch script that can configure the instance with additional data
|
||||
required: false
|
||||
default: null
|
||||
key_pair_name:
|
||||
description:
|
||||
- Name of the key pair to use with the instance
|
||||
required: false
|
||||
default: null
|
||||
wait:
|
||||
description:
|
||||
- Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned
|
||||
default: "yes"
|
||||
choices: [ "yes", "no" ]
|
||||
wait_timeout:
|
||||
description:
|
||||
- How long before wait gives up, in seconds.
|
||||
default: 300
|
||||
open_ports:
|
||||
description:
|
||||
- Adds public ports to an Amazon Lightsail instance.
|
||||
default: null
|
||||
suboptions:
|
||||
from_port:
|
||||
description: Begin of the range
|
||||
required: true
|
||||
default: null
|
||||
to_port:
|
||||
description: End of the range
|
||||
required: true
|
||||
default: null
|
||||
protocol:
|
||||
description: Accepted traffic protocol.
|
||||
required: true
|
||||
choices:
|
||||
- udp
|
||||
- tcp
|
||||
- all
|
||||
default: null
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- boto3
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a new Lightsail instance, register the instance details
|
||||
- lightsail:
|
||||
state: present
|
||||
name: myinstance
|
||||
region: us-east-1
|
||||
zone: us-east-1a
|
||||
blueprint_id: ubuntu_16_04
|
||||
bundle_id: nano_1_0
|
||||
key_pair_name: id_rsa
|
||||
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
|
||||
wait_timeout: 500
|
||||
open_ports:
|
||||
- from_port: 4500
|
||||
to_port: 4500
|
||||
protocol: udp
|
||||
- from_port: 500
|
||||
to_port: 500
|
||||
protocol: udp
|
||||
register: my_instance
|
||||
|
||||
- debug:
|
||||
msg: "Name is {{ my_instance.instance.name }}"
|
||||
|
||||
- debug:
|
||||
msg: "IP is {{ my_instance.instance.publicIpAddress }}"
|
||||
|
||||
# Delete an instance if present
|
||||
- lightsail:
|
||||
state: absent
|
||||
region: us-east-1
|
||||
name: myinstance
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
changed:
|
||||
description: if a snapshot has been modified/created
|
||||
returned: always
|
||||
type: bool
|
||||
sample:
|
||||
changed: true
|
||||
instance:
|
||||
description: instance data
|
||||
returned: always
|
||||
type: dict
|
||||
sample:
|
||||
arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
|
||||
blueprint_id: "ubuntu_16_04"
|
||||
blueprint_name: "Ubuntu"
|
||||
bundle_id: "nano_1_0"
|
||||
created_at: "2017-03-27T08:38:59.714000-04:00"
|
||||
hardware:
|
||||
cpu_count: 1
|
||||
ram_size_in_gb: 0.5
|
||||
is_static_ip: false
|
||||
location:
|
||||
availability_zone: "us-east-1a"
|
||||
region_name: "us-east-1"
|
||||
name: "my_instance"
|
||||
networking:
|
||||
monthly_transfer:
|
||||
gb_per_month_allocated: 1024
|
||||
ports:
|
||||
- access_direction: "inbound"
|
||||
access_from: "Anywhere (0.0.0.0/0)"
|
||||
access_type: "public"
|
||||
common_name: ""
|
||||
from_port: 80
|
||||
protocol: tcp
|
||||
to_port: 80
|
||||
- access_direction: "inbound"
|
||||
access_from: "Anywhere (0.0.0.0/0)"
|
||||
access_type: "public"
|
||||
common_name: ""
|
||||
from_port: 22
|
||||
protocol: tcp
|
||||
to_port: 22
|
||||
private_ip_address: "172.26.8.14"
|
||||
public_ip_address: "34.207.152.202"
|
||||
resource_type: "Instance"
|
||||
ssh_key_name: "keypair"
|
||||
state:
|
||||
code: 16
|
||||
name: running
|
||||
support_code: "588307843083/i-0997c97831ee21e33"
|
||||
username: "ubuntu"
|
||||
'''
|
||||
|
||||
import time
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import botocore
|
||||
HAS_BOTOCORE = True
|
||||
except ImportError:
|
||||
HAS_BOTOCORE = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
except ImportError:
|
||||
# will be caught by imported HAS_BOTO3
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
|
||||
HAS_BOTO3, camel_dict_to_snake_dict)
|
||||
|
||||
|
||||
def create_instance(module, client, instance_name):
|
||||
"""
|
||||
Create an instance
|
||||
|
||||
module: Ansible module object
|
||||
client: authenticated lightsail connection object
|
||||
instance_name: name of instance to delete
|
||||
|
||||
Returns a dictionary of instance information
|
||||
about the new instance.
|
||||
|
||||
"""
|
||||
|
||||
changed = False
|
||||
|
||||
# Check if instance already exists
|
||||
inst = None
|
||||
try:
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] != 'NotFoundException':
|
||||
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
zone = module.params.get('zone')
|
||||
blueprint_id = module.params.get('blueprint_id')
|
||||
bundle_id = module.params.get('bundle_id')
|
||||
user_data = module.params.get('user_data')
|
||||
user_data = '' if user_data is None else user_data
|
||||
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
wait_max = time.time() + wait_timeout
|
||||
|
||||
if module.params.get('key_pair_name'):
|
||||
key_pair_name = module.params.get('key_pair_name')
|
||||
else:
|
||||
key_pair_name = ''
|
||||
|
||||
if module.params.get('open_ports'):
|
||||
open_ports = module.params.get('open_ports')
|
||||
else:
|
||||
open_ports = '[]'
|
||||
|
||||
resp = None
|
||||
if inst is None:
|
||||
try:
|
||||
resp = client.create_instances(
|
||||
instanceNames=[
|
||||
instance_name
|
||||
],
|
||||
availabilityZone=zone,
|
||||
blueprintId=blueprint_id,
|
||||
bundleId=bundle_id,
|
||||
userData=user_data,
|
||||
keyPairName=key_pair_name,
|
||||
)
|
||||
resp = resp['operations'][0]
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
|
||||
# Wait for instance to become running
|
||||
if wait:
|
||||
while (wait_max > time.time()) and (inst is not None and inst['state']['name'] != "running"):
|
||||
try:
|
||||
time.sleep(2)
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
|
||||
exception=traceback.format_exc())
|
||||
elif e.response['Error']['Code'] == "RequestExpired":
|
||||
module.fail_json(msg="RequestExpired: Failed to start instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||
time.sleep(1)
|
||||
|
||||
# Timed out
|
||||
if wait and not changed and wait_max <= time.time():
|
||||
module.fail_json(msg="Wait for instance start timeout at %s" % time.asctime())
|
||||
|
||||
# Attempt to open ports
|
||||
if open_ports:
|
||||
if inst is not None:
|
||||
try:
|
||||
for o in open_ports:
|
||||
resp = client.open_instance_public_ports(
|
||||
instanceName=instance_name,
|
||||
portInfo={
|
||||
'fromPort': o['from_port'],
|
||||
'toPort': o['to_port'],
|
||||
'protocol': o['protocol']
|
||||
}
|
||||
)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg='Error opening ports for instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
changed = True
|
||||
|
||||
return (changed, inst)
|
||||
|
||||
|
||||
def delete_instance(module, client, instance_name):
|
||||
"""
|
||||
Terminates an instance
|
||||
|
||||
module: Ansible module object
|
||||
client: authenticated lightsail connection object
|
||||
instance_name: name of instance to delete
|
||||
|
||||
Returns a dictionary of instance information
|
||||
about the instance deleted (pre-deletion).
|
||||
|
||||
If the instance to be deleted is running
|
||||
"changed" will be set to False.
|
||||
|
||||
"""
|
||||
|
||||
# It looks like deleting removes the instance immediately, nothing to wait for
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
wait_max = time.time() + wait_timeout
|
||||
|
||||
changed = False
|
||||
|
||||
inst = None
|
||||
try:
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] != 'NotFoundException':
|
||||
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
# Wait for instance to exit transition state before deleting
|
||||
if wait:
|
||||
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
|
||||
try:
|
||||
time.sleep(5)
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||
module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
|
||||
exception=traceback.format_exc())
|
||||
elif e.response['Error']['Code'] == "RequestExpired":
|
||||
module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||
# sleep and retry
|
||||
time.sleep(10)
|
||||
|
||||
# Attempt to delete
|
||||
if inst is not None:
|
||||
while not changed and ((wait and wait_max > time.time()) or (not wait)):
|
||||
try:
|
||||
client.delete_instance(instanceName=instance_name)
|
||||
changed = True
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
# Timed out
|
||||
if wait and not changed and wait_max <= time.time():
|
||||
module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime())
|
||||
|
||||
return (changed, inst)
|
||||
|
||||
|
||||
def restart_instance(module, client, instance_name):
|
||||
"""
|
||||
Reboot an existing instance
|
||||
|
||||
module: Ansible module object
|
||||
client: authenticated lightsail connection object
|
||||
instance_name: name of instance to reboot
|
||||
|
||||
Returns a dictionary of instance information
|
||||
about the restarted instance
|
||||
|
||||
If the instance was not able to reboot,
|
||||
"changed" will be set to False.
|
||||
|
||||
Wait will not apply here as this is an OS-level operation
|
||||
"""
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
wait_max = time.time() + wait_timeout
|
||||
|
||||
changed = False
|
||||
|
||||
inst = None
|
||||
try:
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] != 'NotFoundException':
|
||||
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
# Wait for instance to exit transition state before state change
|
||||
if wait:
|
||||
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
|
||||
try:
|
||||
time.sleep(5)
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||
module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
|
||||
exception=traceback.format_exc())
|
||||
elif e.response['Error']['Code'] == "RequestExpired":
|
||||
module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||
time.sleep(3)
|
||||
|
||||
# send reboot
|
||||
if inst is not None:
|
||||
try:
|
||||
client.reboot_instance(instanceName=instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] != 'NotFoundException':
|
||||
module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e))
|
||||
changed = True
|
||||
|
||||
return (changed, inst)
|
||||
|
||||
|
||||
def startstop_instance(module, client, instance_name, state):
|
||||
"""
|
||||
Starts or stops an existing instance
|
||||
|
||||
module: Ansible module object
|
||||
client: authenticated lightsail connection object
|
||||
instance_name: name of instance to start/stop
|
||||
state: Target state ("running" or "stopped")
|
||||
|
||||
Returns a dictionary of instance information
|
||||
about the instance started/stopped
|
||||
|
||||
If the instance was not able to state change,
|
||||
"changed" will be set to False.
|
||||
|
||||
"""
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
wait_max = time.time() + wait_timeout
|
||||
|
||||
changed = False
|
||||
|
||||
inst = None
|
||||
try:
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['Error']['Code'] != 'NotFoundException':
|
||||
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||
|
||||
# Wait for instance to exit transition state before state change
|
||||
if wait:
|
||||
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
|
||||
try:
|
||||
time.sleep(5)
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
|
||||
exception=traceback.format_exc())
|
||||
elif e.response['Error']['Code'] == "RequestExpired":
|
||||
module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||
time.sleep(1)
|
||||
|
||||
# Try state change
|
||||
if inst is not None and inst['state']['name'] != state:
|
||||
try:
|
||||
if state == 'running':
|
||||
client.start_instance(instanceName=instance_name)
|
||||
else:
|
||||
client.stop_instance(instanceName=instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e))
|
||||
changed = True
|
||||
# Grab current instance info
|
||||
inst = _find_instance_info(client, instance_name)
|
||||
|
||||
return (changed, inst)
|
||||
|
||||
|
||||
def core(module):
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if not region:
|
||||
module.fail_json(msg='region must be specified')
|
||||
|
||||
client = None
|
||||
try:
|
||||
client = boto3_conn(module, conn_type='client', resource='lightsail',
|
||||
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
|
||||
module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
|
||||
|
||||
changed = False
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
|
||||
if state == 'absent':
|
||||
changed, instance_dict = delete_instance(module, client, name)
|
||||
elif state in ('running', 'stopped'):
|
||||
changed, instance_dict = startstop_instance(module, client, name, state)
|
||||
elif state == 'restarted':
|
||||
changed, instance_dict = restart_instance(module, client, name)
|
||||
elif state == 'present':
|
||||
changed, instance_dict = create_instance(module, client, name)
|
||||
|
||||
module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
|
||||
|
||||
|
||||
def _find_instance_info(client, instance_name):
|
||||
''' handle exceptions where this function is called '''
|
||||
inst = None
|
||||
try:
|
||||
inst = client.get_instance(instanceName=instance_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
raise
|
||||
return inst['instance']
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']),
|
||||
zone=dict(type='str'),
|
||||
blueprint_id=dict(type='str'),
|
||||
bundle_id=dict(type='str'),
|
||||
key_pair_name=dict(type='str'),
|
||||
user_data=dict(type='str'),
|
||||
wait=dict(type='bool', default=True),
|
||||
wait_timeout=dict(default=300),
|
||||
open_ports=dict(type='list')
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='Python module "boto3" is missing, please install it')
|
||||
|
||||
if not HAS_BOTOCORE:
|
||||
module.fail_json(msg='Python module "botocore" is missing, please install it')
|
||||
|
||||
try:
|
||||
core(module)
|
||||
except (botocore.exceptions.ClientError, Exception) as e:
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
19
main.yml
19
main.yml
|
@ -9,7 +9,7 @@
|
|||
|
||||
- name: Ensure Ansible is not being run in a world writable directory
|
||||
assert:
|
||||
that: _playbook_dir.stat.mode|int <= 0775
|
||||
that: _playbook_dir.stat.mode|int <= 775
|
||||
msg: >
|
||||
Ansible is being run in a world writable directory ({{ playbook_dir }}), ignoring it as an ansible.cfg source.
|
||||
For more information see https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir
|
||||
|
@ -23,27 +23,30 @@
|
|||
|
||||
- name: Set required ansible version as a fact
|
||||
set_fact:
|
||||
required_ansible_version:
|
||||
"{{ item | regex_replace('^ansible[\\s+]?(?P<op>[=,>,<]+)[\\s+]?(?P<ver>\\d.\\d+(.\\d+)?)$',
|
||||
'{\"op\": \"\\g<op>\",\"ver\": \"\\g<ver>\" }') }}"
|
||||
required_ansible_version: "{{ item | regex_replace('^ansible[\\s+]?(?P<op>[=,>,<]+)[\\s+]?(?P<ver>\\d.\\d+(.\\d+)?)$', '{\"op\": \"\\g<op>\",\"ver\"\
|
||||
: \"\\g<ver>\" }') }}"
|
||||
when: '"ansible" in item'
|
||||
with_items: "{{ lookup('file', 'requirements.txt').splitlines() }}"
|
||||
|
||||
- name: Just get the list from default pip
|
||||
community.general.pip_package_info:
|
||||
register: pip_package_info
|
||||
|
||||
- name: Verify Python meets Algo VPN requirements
|
||||
assert:
|
||||
that: (ansible_python.version.major|string + '.' + ansible_python.version.minor|string)|float is version('3.6', '>=')
|
||||
that: (ansible_python.version.major|string + '.' + ansible_python.version.minor|string) is version('3.8', '>=')
|
||||
msg: >
|
||||
Python version is not supported.
|
||||
You must upgrade to at least Python 3.6 to use this version of Algo.
|
||||
You must upgrade to at least Python 3.8 to use this version of Algo.
|
||||
See for more details - https://trailofbits.github.io/algo/troubleshooting.html#python-version-is-not-supported
|
||||
|
||||
- name: Verify Ansible meets Algo VPN requirements
|
||||
assert:
|
||||
that:
|
||||
- ansible_version.full is version(required_ansible_version.ver, required_ansible_version.op)
|
||||
- pip_package_info.packages.pip.ansible.0.version is version(required_ansible_version.ver, required_ansible_version.op)
|
||||
- not ipaddr.failed
|
||||
msg: >
|
||||
Ansible version is {{ ansible_version.full }}.
|
||||
Ansible version is {{ pip_package_info.packages.pip.ansible.0.version }}.
|
||||
You must update the requirements to use this version of Algo.
|
||||
Try to run python3 -m pip install -U -r requirements.txt
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
ansible_connection: "{% if cloud_instance_ip == 'localhost' %}local{% else %}ssh{% endif %}"
|
||||
ansible_ssh_user: "{{ ansible_ssh_user|default('root') }}"
|
||||
ansible_ssh_port: "{{ ansible_ssh_port|default(22) }}"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
algo_provider: "{{ algo_provider }}"
|
||||
algo_server_name: "{{ algo_server_name }}"
|
||||
algo_ondemand_cellular: "{{ algo_ondemand_cellular }}"
|
||||
|
@ -33,7 +33,7 @@
|
|||
wait_for:
|
||||
port: "{{ ansible_ssh_port|default(22) }}"
|
||||
host: "{{ cloud_instance_ip }}"
|
||||
search_regex: "OpenSSH"
|
||||
search_regex: OpenSSH
|
||||
delay: 10
|
||||
timeout: 320
|
||||
state: present
|
||||
|
@ -44,8 +44,7 @@
|
|||
when:
|
||||
- pki_in_tmpfs
|
||||
- not algo_store_pki
|
||||
- ansible_system == "Darwin" or
|
||||
ansible_system == "Linux"
|
||||
- ansible_system == "Darwin" or ansible_system == "Linux"
|
||||
|
||||
- debug:
|
||||
var: IP_subject_alt_name
|
||||
|
|
|
@ -1,53 +1,53 @@
|
|||
---
|
||||
- block:
|
||||
- name: Display the invocation environment
|
||||
shell: >
|
||||
./algo-showenv.sh \
|
||||
'algo_provider "{{ algo_provider }}"' \
|
||||
{% if ipsec_enabled %}
|
||||
'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \
|
||||
'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \
|
||||
'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \
|
||||
{% endif %}
|
||||
'algo_dns_adblocking "{{ algo_dns_adblocking }}"' \
|
||||
'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \
|
||||
'wireguard_enabled "{{ wireguard_enabled }}"' \
|
||||
'dns_encryption "{{ dns_encryption }}"' \
|
||||
> /dev/tty
|
||||
tags: debug
|
||||
- name: Display the invocation environment
|
||||
shell: >
|
||||
./algo-showenv.sh \
|
||||
'algo_provider "{{ algo_provider }}"' \
|
||||
{% if ipsec_enabled %}
|
||||
'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \
|
||||
'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \
|
||||
'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \
|
||||
{% endif %}
|
||||
'algo_dns_adblocking "{{ algo_dns_adblocking }}"' \
|
||||
'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \
|
||||
'wireguard_enabled "{{ wireguard_enabled }}"' \
|
||||
'dns_encryption "{{ dns_encryption }}"' \
|
||||
> /dev/tty || true
|
||||
tags: debug
|
||||
|
||||
- name: Install the requirements
|
||||
pip:
|
||||
state: present
|
||||
name:
|
||||
- pyOpenSSL>=0.15
|
||||
- segno
|
||||
tags:
|
||||
- always
|
||||
- skip_ansible_lint
|
||||
- name: Install the requirements
|
||||
pip:
|
||||
state: present
|
||||
name:
|
||||
- pyOpenSSL>=0.15
|
||||
- segno
|
||||
tags:
|
||||
- always
|
||||
- skip_ansible_lint
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- block:
|
||||
- name: Generate the SSH private key
|
||||
openssl_privatekey:
|
||||
path: "{{ SSH_keys.private }}"
|
||||
size: 2048
|
||||
mode: "0600"
|
||||
type: RSA
|
||||
- name: Generate the SSH private key
|
||||
openssl_privatekey:
|
||||
path: "{{ SSH_keys.private }}"
|
||||
size: 2048
|
||||
mode: "0600"
|
||||
type: RSA
|
||||
|
||||
- name: Generate the SSH public key
|
||||
openssl_publickey:
|
||||
path: "{{ SSH_keys.public }}"
|
||||
privatekey_path: "{{ SSH_keys.private }}"
|
||||
format: OpenSSH
|
||||
- name: Generate the SSH public key
|
||||
openssl_publickey:
|
||||
path: "{{ SSH_keys.public }}"
|
||||
privatekey_path: "{{ SSH_keys.private }}"
|
||||
format: OpenSSH
|
||||
|
||||
- name: Copy the private SSH key to /tmp
|
||||
copy:
|
||||
src: "{{ SSH_keys.private }}"
|
||||
dest: "{{ SSH_keys.private_tmp }}"
|
||||
force: true
|
||||
mode: '0600'
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
- name: Copy the private SSH key to /tmp
|
||||
copy:
|
||||
src: "{{ SSH_keys.private }}"
|
||||
dest: "{{ SSH_keys.private_tmp }}"
|
||||
force: true
|
||||
mode: "0600"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
when: algo_provider != "local"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: Linux | set OS specific facts
|
||||
set_fact:
|
||||
tmpfs_volume_name: "AlgoVPN-{{ IP_subject_alt_name }}"
|
||||
tmpfs_volume_name: AlgoVPN-{{ IP_subject_alt_name }}
|
||||
tmpfs_volume_path: /dev/shm
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: MacOS | set OS specific facts
|
||||
set_fact:
|
||||
tmpfs_volume_name: "AlgoVPN-{{ IP_subject_alt_name }}"
|
||||
tmpfs_volume_name: AlgoVPN-{{ IP_subject_alt_name }}
|
||||
tmpfs_volume_path: /Volumes
|
||||
|
||||
- name: MacOS | mount a ram disk
|
||||
|
@ -9,4 +9,4 @@
|
|||
/usr/sbin/diskutil info "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/" ||
|
||||
/usr/sbin/diskutil erasevolume HFS+ "{{ tmpfs_volume_name }}" $(hdiutil attach -nomount ram://64000)
|
||||
args:
|
||||
creates: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}"
|
||||
creates: /{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
- name: Set config paths as facts
|
||||
set_fact:
|
||||
ipsec_pki_path: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/IPsec/"
|
||||
ipsec_pki_path: /{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/IPsec/
|
||||
|
||||
- name: Update config paths
|
||||
add_host:
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
---
|
||||
- name: Linux | Delete the PKI directory
|
||||
file:
|
||||
path: "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/"
|
||||
path: /{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/
|
||||
state: absent
|
||||
when: facts.ansible_system == "Linux"
|
||||
|
||||
- block:
|
||||
- name: MacOS | check fs the ramdisk exists
|
||||
command: /usr/sbin/diskutil info "{{ facts.tmpfs_volume_name }}"
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
register: diskutil_info
|
||||
- name: MacOS | check fs the ramdisk exists
|
||||
command: /usr/sbin/diskutil info "{{ facts.tmpfs_volume_name }}"
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
register: diskutil_info
|
||||
|
||||
- name: MacOS | unmount and eject the ram disk
|
||||
shell: >
|
||||
/usr/sbin/diskutil umount force "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/" &&
|
||||
/usr/sbin/diskutil eject "{{ facts.tmpfs_volume_name }}"
|
||||
changed_when: false
|
||||
when: diskutil_info.rc == 0
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
retries: 5
|
||||
delay: 3
|
||||
- name: MacOS | unmount and eject the ram disk
|
||||
shell: >
|
||||
/usr/sbin/diskutil umount force "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/" &&
|
||||
/usr/sbin/diskutil eject "{{ facts.tmpfs_volume_name }}"
|
||||
changed_when: false
|
||||
when: diskutil_info.rc == 0
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
retries: 5
|
||||
delay: 3
|
||||
when:
|
||||
- facts.ansible_system == "Darwin"
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
ansible==2.9.20
|
||||
jinja2==2.8
|
||||
ansible==6.1.0
|
||||
jinja2~=3.0.3
|
||||
netaddr
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
---
|
||||
- name: restart strongswan
|
||||
service: name=strongswan state=restarted
|
||||
service: name={{ strongswan_service }} state=restarted
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Gather Facts
|
||||
setup:
|
||||
|
||||
- name: Include system based facts and tasks
|
||||
import_tasks: systems/main.yml
|
||||
|
||||
|
@ -22,9 +22,9 @@
|
|||
|
||||
- name: Setup the ipsec config
|
||||
template:
|
||||
src: "roles/strongswan/templates/client_ipsec.conf.j2"
|
||||
src: roles/strongswan/templates/client_ipsec.conf.j2
|
||||
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.conf"
|
||||
mode: '0644'
|
||||
mode: "0644"
|
||||
with_items:
|
||||
- "{{ vpn_user }}"
|
||||
notify:
|
||||
|
@ -32,9 +32,9 @@
|
|||
|
||||
- name: Setup the ipsec secrets
|
||||
template:
|
||||
src: "roles/strongswan/templates/client_ipsec.secrets.j2"
|
||||
src: roles/strongswan/templates/client_ipsec.secrets.j2
|
||||
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.secrets"
|
||||
mode: '0600'
|
||||
mode: "0600"
|
||||
with_items:
|
||||
- "{{ vpn_user }}"
|
||||
notify:
|
||||
|
@ -44,12 +44,12 @@
|
|||
lineinfile:
|
||||
dest: "{{ item.dest }}"
|
||||
line: "{{ item.line }}"
|
||||
create: yes
|
||||
create: true
|
||||
with_items:
|
||||
- dest: "{{ configs_prefix }}/ipsec.conf"
|
||||
line: "include ipsec.{{ IP_subject_alt_name }}.conf"
|
||||
line: include ipsec.{{ IP_subject_alt_name }}.conf
|
||||
- dest: "{{ configs_prefix }}/ipsec.secrets"
|
||||
line: "include ipsec.{{ IP_subject_alt_name }}.secrets"
|
||||
line: include ipsec.{{ IP_subject_alt_name }}.secrets
|
||||
notify:
|
||||
- restart strongswan
|
||||
|
||||
|
@ -66,11 +66,11 @@
|
|||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
with_items:
|
||||
- src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/certs/{{ vpn_user }}.crt"
|
||||
- src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/certs/{{ vpn_user }}.crt
|
||||
dest: "{{ configs_prefix }}/ipsec.d/certs/{{ vpn_user }}.crt"
|
||||
- src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/cacert.pem"
|
||||
- src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/cacert.pem
|
||||
dest: "{{ configs_prefix }}/ipsec.d/cacerts/{{ IP_subject_alt_name }}.pem"
|
||||
- src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/private/{{ vpn_user }}.key"
|
||||
- src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/private/{{ vpn_user }}.key
|
||||
dest: "{{ configs_prefix }}/ipsec.d/private/{{ vpn_user }}.key"
|
||||
notify:
|
||||
- restart strongswan
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
- include_tasks: Debian.yml
|
||||
when: ansible_distribution == 'Debian'
|
||||
|
||||
|
|
|
@ -1,242 +1,210 @@
|
|||
---
|
||||
_azure_regions: >
|
||||
[
|
||||
{
|
||||
"displayName": "East Asia",
|
||||
"latitude": "22.267",
|
||||
"longitude": "114.188",
|
||||
"name": "eastasia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Southeast Asia",
|
||||
"latitude": "1.283",
|
||||
"longitude": "103.833",
|
||||
"name": "southeastasia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Central US",
|
||||
"latitude": "41.5908",
|
||||
"longitude": "-93.6208",
|
||||
"name": "centralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "East US",
|
||||
"latitude": "37.3719",
|
||||
"longitude": "-79.8164",
|
||||
"name": "eastus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "East US 2",
|
||||
"latitude": "36.6681",
|
||||
"longitude": "-78.3889",
|
||||
"name": "eastus2",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West US",
|
||||
"latitude": "37.783",
|
||||
"longitude": "-122.417",
|
||||
"name": "westus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "North Central US",
|
||||
"latitude": "41.8819",
|
||||
"longitude": "-87.6278",
|
||||
"name": "northcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South Central US",
|
||||
"latitude": "29.4167",
|
||||
"longitude": "-98.5",
|
||||
"name": "southcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "North Europe",
|
||||
"latitude": "53.3478",
|
||||
"longitude": "-6.2597",
|
||||
"name": "northeurope",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West Europe",
|
||||
"latitude": "52.3667",
|
||||
"longitude": "4.9",
|
||||
"name": "westeurope",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Japan West",
|
||||
"latitude": "34.6939",
|
||||
"longitude": "135.5022",
|
||||
"name": "japanwest",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Japan East",
|
||||
"latitude": "35.68",
|
||||
"longitude": "139.77",
|
||||
"name": "japaneast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Brazil South",
|
||||
"latitude": "-23.55",
|
||||
"longitude": "-46.633",
|
||||
"name": "brazilsouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia East",
|
||||
"latitude": "-33.86",
|
||||
"longitude": "151.2094",
|
||||
"name": "australiaeast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Southeast",
|
||||
"latitude": "-37.8136",
|
||||
"longitude": "144.9631",
|
||||
"name": "australiasoutheast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South India",
|
||||
"latitude": "12.9822",
|
||||
"longitude": "80.1636",
|
||||
"name": "southindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Central India",
|
||||
"latitude": "18.5822",
|
||||
"longitude": "73.9197",
|
||||
"name": "centralindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West India",
|
||||
"latitude": "19.088",
|
||||
"longitude": "72.868",
|
||||
"name": "westindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Canada Central",
|
||||
"latitude": "43.653",
|
||||
"longitude": "-79.383",
|
||||
"name": "canadacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Canada East",
|
||||
"latitude": "46.817",
|
||||
"longitude": "-71.217",
|
||||
"name": "canadaeast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UK South",
|
||||
"latitude": "50.941",
|
||||
"longitude": "-0.799",
|
||||
"name": "uksouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UK West",
|
||||
"latitude": "53.427",
|
||||
"longitude": "-3.084",
|
||||
"name": "ukwest",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West Central US",
|
||||
"latitude": "40.890",
|
||||
"longitude": "-110.234",
|
||||
"name": "westcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West US 2",
|
||||
"latitude": "47.233",
|
||||
"longitude": "-119.852",
|
||||
"name": "westus2",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Korea Central",
|
||||
"latitude": "37.5665",
|
||||
"longitude": "126.9780",
|
||||
"name": "koreacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Korea South",
|
||||
"latitude": "35.1796",
|
||||
"longitude": "129.0756",
|
||||
"name": "koreasouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "France Central",
|
||||
"latitude": "46.3772",
|
||||
"longitude": "2.3730",
|
||||
"name": "francecentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "France South",
|
||||
"latitude": "43.8345",
|
||||
"longitude": "2.1972",
|
||||
"name": "francesouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Central",
|
||||
"latitude": "-35.3075",
|
||||
"longitude": "149.1244",
|
||||
"name": "australiacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Central 2",
|
||||
"latitude": "-35.3075",
|
||||
"longitude": "149.1244",
|
||||
"name": "australiacentral2",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UAE Central",
|
||||
"latitude": "24.466667",
|
||||
"longitude": "54.366669",
|
||||
"name": "uaecentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UAE North",
|
||||
"latitude": "25.266666",
|
||||
"longitude": "55.316666",
|
||||
"name": "uaenorth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South Africa North",
|
||||
"latitude": "-25.731340",
|
||||
"longitude": "28.218370",
|
||||
"name": "southafricanorth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South Africa West",
|
||||
"latitude": "-34.075691",
|
||||
"longitude": "18.843266",
|
||||
"name": "southafricawest",
|
||||
"subscriptionId": null
|
||||
}
|
||||
]
|
||||
# az account list-locations --query 'sort_by([].{name:name,displayName:displayName,regionalDisplayName:regionalDisplayName}, &name)' -o yaml
|
||||
azure_regions:
|
||||
- displayName: Asia
|
||||
name: asia
|
||||
regionalDisplayName: Asia
|
||||
- displayName: Asia Pacific
|
||||
name: asiapacific
|
||||
regionalDisplayName: Asia Pacific
|
||||
- displayName: Australia
|
||||
name: australia
|
||||
regionalDisplayName: Australia
|
||||
- displayName: Australia Central
|
||||
name: australiacentral
|
||||
regionalDisplayName: (Asia Pacific) Australia Central
|
||||
- displayName: Australia Central 2
|
||||
name: australiacentral2
|
||||
regionalDisplayName: (Asia Pacific) Australia Central 2
|
||||
- displayName: Australia East
|
||||
name: australiaeast
|
||||
regionalDisplayName: (Asia Pacific) Australia East
|
||||
- displayName: Australia Southeast
|
||||
name: australiasoutheast
|
||||
regionalDisplayName: (Asia Pacific) Australia Southeast
|
||||
- displayName: Brazil
|
||||
name: brazil
|
||||
regionalDisplayName: Brazil
|
||||
- displayName: Brazil South
|
||||
name: brazilsouth
|
||||
regionalDisplayName: (South America) Brazil South
|
||||
- displayName: Brazil Southeast
|
||||
name: brazilsoutheast
|
||||
regionalDisplayName: (South America) Brazil Southeast
|
||||
- displayName: Canada
|
||||
name: canada
|
||||
regionalDisplayName: Canada
|
||||
- displayName: Canada Central
|
||||
name: canadacentral
|
||||
regionalDisplayName: (Canada) Canada Central
|
||||
- displayName: Canada East
|
||||
name: canadaeast
|
||||
regionalDisplayName: (Canada) Canada East
|
||||
- displayName: Central India
|
||||
name: centralindia
|
||||
regionalDisplayName: (Asia Pacific) Central India
|
||||
- displayName: Central US
|
||||
name: centralus
|
||||
regionalDisplayName: (US) Central US
|
||||
- displayName: Central US EUAP
|
||||
name: centraluseuap
|
||||
regionalDisplayName: (US) Central US EUAP
|
||||
- displayName: Central US (Stage)
|
||||
name: centralusstage
|
||||
regionalDisplayName: (US) Central US (Stage)
|
||||
- displayName: East Asia
|
||||
name: eastasia
|
||||
regionalDisplayName: (Asia Pacific) East Asia
|
||||
- displayName: East Asia (Stage)
|
||||
name: eastasiastage
|
||||
regionalDisplayName: (Asia Pacific) East Asia (Stage)
|
||||
- displayName: East US
|
||||
name: eastus
|
||||
regionalDisplayName: (US) East US
|
||||
- displayName: East US 2
|
||||
name: eastus2
|
||||
regionalDisplayName: (US) East US 2
|
||||
- displayName: East US 2 EUAP
|
||||
name: eastus2euap
|
||||
regionalDisplayName: (US) East US 2 EUAP
|
||||
- displayName: East US 2 (Stage)
|
||||
name: eastus2stage
|
||||
regionalDisplayName: (US) East US 2 (Stage)
|
||||
- displayName: East US (Stage)
|
||||
name: eastusstage
|
||||
regionalDisplayName: (US) East US (Stage)
|
||||
- displayName: Europe
|
||||
name: europe
|
||||
regionalDisplayName: Europe
|
||||
- displayName: France Central
|
||||
name: francecentral
|
||||
regionalDisplayName: (Europe) France Central
|
||||
- displayName: France South
|
||||
name: francesouth
|
||||
regionalDisplayName: (Europe) France South
|
||||
- displayName: Germany North
|
||||
name: germanynorth
|
||||
regionalDisplayName: (Europe) Germany North
|
||||
- displayName: Germany West Central
|
||||
name: germanywestcentral
|
||||
regionalDisplayName: (Europe) Germany West Central
|
||||
- displayName: Global
|
||||
name: global
|
||||
regionalDisplayName: Global
|
||||
- displayName: India
|
||||
name: india
|
||||
regionalDisplayName: India
|
||||
- displayName: Japan
|
||||
name: japan
|
||||
regionalDisplayName: Japan
|
||||
- displayName: Japan East
|
||||
name: japaneast
|
||||
regionalDisplayName: (Asia Pacific) Japan East
|
||||
- displayName: Japan West
|
||||
name: japanwest
|
||||
regionalDisplayName: (Asia Pacific) Japan West
|
||||
- displayName: Jio India Central
|
||||
name: jioindiacentral
|
||||
regionalDisplayName: (Asia Pacific) Jio India Central
|
||||
- displayName: Jio India West
|
||||
name: jioindiawest
|
||||
regionalDisplayName: (Asia Pacific) Jio India West
|
||||
- displayName: Korea Central
|
||||
name: koreacentral
|
||||
regionalDisplayName: (Asia Pacific) Korea Central
|
||||
- displayName: Korea South
|
||||
name: koreasouth
|
||||
regionalDisplayName: (Asia Pacific) Korea South
|
||||
- displayName: North Central US
|
||||
name: northcentralus
|
||||
regionalDisplayName: (US) North Central US
|
||||
- displayName: North Central US (Stage)
|
||||
name: northcentralusstage
|
||||
regionalDisplayName: (US) North Central US (Stage)
|
||||
- displayName: North Europe
|
||||
name: northeurope
|
||||
regionalDisplayName: (Europe) North Europe
|
||||
- displayName: Norway East
|
||||
name: norwayeast
|
||||
regionalDisplayName: (Europe) Norway East
|
||||
- displayName: Norway West
|
||||
name: norwaywest
|
||||
regionalDisplayName: (Europe) Norway West
|
||||
- displayName: Qatar Central
|
||||
name: qatarcentral
|
||||
regionalDisplayName: (Europe) Qatar Central
|
||||
- displayName: South Africa North
|
||||
name: southafricanorth
|
||||
regionalDisplayName: (Africa) South Africa North
|
||||
- displayName: South Africa West
|
||||
name: southafricawest
|
||||
regionalDisplayName: (Africa) South Africa West
|
||||
- displayName: South Central US
|
||||
name: southcentralus
|
||||
regionalDisplayName: (US) South Central US
|
||||
- displayName: South Central US (Stage)
|
||||
name: southcentralusstage
|
||||
regionalDisplayName: (US) South Central US (Stage)
|
||||
- displayName: Southeast Asia
|
||||
name: southeastasia
|
||||
regionalDisplayName: (Asia Pacific) Southeast Asia
|
||||
- displayName: Southeast Asia (Stage)
|
||||
name: southeastasiastage
|
||||
regionalDisplayName: (Asia Pacific) Southeast Asia (Stage)
|
||||
- displayName: South India
|
||||
name: southindia
|
||||
regionalDisplayName: (Asia Pacific) South India
|
||||
- displayName: Sweden Central
|
||||
name: swedencentral
|
||||
regionalDisplayName: (Europe) Sweden Central
|
||||
- displayName: Sweden South
|
||||
name: swedensouth
|
||||
regionalDisplayName: (Europe) Sweden South
|
||||
- displayName: Switzerland North
|
||||
name: switzerlandnorth
|
||||
regionalDisplayName: (Europe) Switzerland North
|
||||
- displayName: Switzerland West
|
||||
name: switzerlandwest
|
||||
regionalDisplayName: (Europe) Switzerland West
|
||||
- displayName: UAE Central
|
||||
name: uaecentral
|
||||
regionalDisplayName: (Middle East) UAE Central
|
||||
- displayName: UAE North
|
||||
name: uaenorth
|
||||
regionalDisplayName: (Middle East) UAE North
|
||||
- displayName: United Kingdom
|
||||
name: uk
|
||||
regionalDisplayName: United Kingdom
|
||||
- displayName: UK South
|
||||
name: uksouth
|
||||
regionalDisplayName: (Europe) UK South
|
||||
- displayName: UK West
|
||||
name: ukwest
|
||||
regionalDisplayName: (Europe) UK West
|
||||
- displayName: United States
|
||||
name: unitedstates
|
||||
regionalDisplayName: United States
|
||||
- displayName: West Central US
|
||||
name: westcentralus
|
||||
regionalDisplayName: (US) West Central US
|
||||
- displayName: West Europe
|
||||
name: westeurope
|
||||
regionalDisplayName: (Europe) West Europe
|
||||
- displayName: West India
|
||||
name: westindia
|
||||
regionalDisplayName: (Asia Pacific) West India
|
||||
- displayName: West US
|
||||
name: westus
|
||||
regionalDisplayName: (US) West US
|
||||
- displayName: West US 2
|
||||
name: westus2
|
||||
regionalDisplayName: (US) West US 2
|
||||
- displayName: West US 2 (Stage)
|
||||
name: westus2stage
|
||||
regionalDisplayName: (US) West US 2 (Stage)
|
||||
- displayName: West US 3
|
||||
name: westus3
|
||||
regionalDisplayName: (US) West US 3
|
||||
- displayName: West US (Stage)
|
||||
name: westusstage
|
||||
regionalDisplayName: (US) West US (Stage)
|
||||
|
|
|
@ -6,25 +6,21 @@
|
|||
subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}"
|
||||
|
||||
- block:
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
azure_regions: "{{ _azure_regions|from_json | sort(attribute='name') }}"
|
||||
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in azure_regions %}
|
||||
{%- if r['name'] == "eastus" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in azure_regions %}
|
||||
{{ loop.index }}. {{ r['displayName'] }}
|
||||
{% endfor %}
|
||||
{%- if r['name'] == "eastus" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
{% for r in azure_regions %}
|
||||
{{ loop.index }}. {{ r['regionalDisplayName'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
|
|
@ -1,45 +1,6 @@
|
|||
---
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name:
|
||||
- packaging
|
||||
- requests[security]
|
||||
- xmltodict
|
||||
- azure-cli-core==2.0.35
|
||||
- azure-cli-nspkg==3.0.2
|
||||
- azure-common==1.1.11
|
||||
- azure-mgmt-authorization==0.51.1
|
||||
- azure-mgmt-batch==5.0.1
|
||||
- azure-mgmt-cdn==3.0.0
|
||||
- azure-mgmt-compute==4.4.0
|
||||
- azure-mgmt-containerinstance==1.4.0
|
||||
- azure-mgmt-containerregistry==2.0.0
|
||||
- azure-mgmt-containerservice==4.4.0
|
||||
- azure-mgmt-dns==2.1.0
|
||||
- azure-mgmt-keyvault==1.1.0
|
||||
- azure-mgmt-marketplaceordering==0.1.0
|
||||
- azure-mgmt-monitor==0.5.2
|
||||
- azure-mgmt-network==2.3.0
|
||||
- azure-mgmt-nspkg==2.0.0
|
||||
- azure-mgmt-redis==5.0.0
|
||||
- azure-mgmt-resource==2.1.0
|
||||
- azure-mgmt-rdbms==1.4.1
|
||||
- azure-mgmt-servicebus==0.5.3
|
||||
- azure-mgmt-sql==0.10.0
|
||||
- azure-mgmt-storage==3.1.0
|
||||
- azure-mgmt-trafficmanager==0.50.0
|
||||
- azure-mgmt-web==0.41.0
|
||||
- azure-nspkg==2.0.0
|
||||
- azure-storage==0.35.1
|
||||
- msrest==0.6.1
|
||||
- msrestazure==0.5.0
|
||||
- azure-keyvault==1.0.0a1
|
||||
- azure-graphrbac==0.40.0
|
||||
- azure-mgmt-cosmosdb==0.5.2
|
||||
- azure-mgmt-hdinsight==0.1.0
|
||||
- azure-mgmt-devtestlabs==3.0.0
|
||||
- azure-mgmt-loganalytics==0.2.0
|
||||
- azure-mgmt-automation==0.1.1
|
||||
- azure-mgmt-iothub==0.7.0
|
||||
requirements: https://raw.githubusercontent.com/ansible-collections/azure/v1.13.0/requirements-azure.txt
|
||||
state: latest
|
||||
virtualenv_python: python3
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
end_port: "{{ item.end_port }}"
|
||||
cidr: "{{ item.range }}"
|
||||
with_items:
|
||||
- { proto: tcp, start_port: '{{ ssh_port }}', end_port: '{{ ssh_port }}', range: 0.0.0.0/0 }
|
||||
- { proto: tcp, start_port: "{{ ssh_port }}", end_port: "{{ ssh_port }}", range: 0.0.0.0/0 }
|
||||
- { proto: udp, start_port: 4500, end_port: 4500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, start_port: 500, end_port: 500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, start_port: "{{ wireguard_port }}", end_port: "{{ wireguard_port }}", range: 0.0.0.0/0 }
|
||||
|
@ -54,5 +54,6 @@
|
|||
ansible_ssh_port: "{{ ssh_port }}"
|
||||
cloudinit: true
|
||||
environment:
|
||||
CLOUDSTACK_CONFIG: "{{ algo_cs_config }}"
|
||||
CLOUDSTACK_REGION: "{{ algo_cs_region }}"
|
||||
CLOUDSTACK_KEY: "{{ algo_cs_key }}"
|
||||
CLOUDSTACK_SECRET: "{{ algo_cs_token }}"
|
||||
CLOUDSTACK_ENDPOINT: "{{ algo_cs_url }}"
|
||||
|
|
|
@ -1,54 +1,65 @@
|
|||
---
|
||||
- block:
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter path for cloudstack.ini file (https://trailofbits.github.io/algo/cloud-cloudstack.html)
|
||||
[~/.cloudstack.ini]
|
||||
register: _cs_config
|
||||
when:
|
||||
- cs_config is undefined
|
||||
- lookup('env', 'CLOUDSTACK_CONFIG') | length <= 0
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the API key (https://trailofbits.github.io/algo/cloud-cloudstack.html):
|
||||
echo: false
|
||||
register: _cs_key
|
||||
when:
|
||||
- cs_key is undefined
|
||||
- lookup('env','CLOUDSTACK_KEY')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Specify region to use in cloudstack.ini file
|
||||
[exoscale]
|
||||
register: _cs_region
|
||||
when:
|
||||
- cs_region is undefined
|
||||
- lookup('env', 'CLOUDSTACK_REGION') | length <= 0
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the API ssecret (https://trailofbits.github.io/algo/cloud-cloudstack.html):
|
||||
echo: false
|
||||
register: _cs_secret
|
||||
when:
|
||||
- cs_secret is undefined
|
||||
- lookup('env','CLOUDSTACK_SECRET')|length <= 0
|
||||
|
||||
- set_fact:
|
||||
algo_cs_config: "{{ cs_config | default(_cs_config.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_CONFIG'), true) | default('~/.cloudstack.ini', true) }}"
|
||||
algo_cs_region: "{{ cs_region | default(_cs_region.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_REGION'), true) | default('exoscale', true) }}"
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the API endpoint (https://trailofbits.github.io/algo/cloud-cloudstack.html)
|
||||
[https://api.exoscale.com/compute]
|
||||
register: _cs_url
|
||||
when:
|
||||
- cs_url is undefined
|
||||
- lookup('env', 'CLOUDSTACK_ENDPOINT') | length <= 0
|
||||
|
||||
- name: Get zones on cloud
|
||||
cloudstack_zones:
|
||||
register: _cs_zones
|
||||
environment:
|
||||
CLOUDSTACK_CONFIG: "{{ algo_cs_config }}"
|
||||
CLOUDSTACK_REGION: "{{ algo_cs_region }}"
|
||||
- set_fact:
|
||||
algo_cs_key: "{{ cs_key | default(_cs_key.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_KEY'), true) }}"
|
||||
algo_cs_token: "{{ cs_secret | default(_cs_secret.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_SECRET'), true) }}"
|
||||
algo_cs_url: "{{ cs_url | default(_cs_url.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_ENDPOINT'), true) | default('https://api.exoscale.com/compute',\
|
||||
\ true) }}"
|
||||
|
||||
- name: Extract zones from output
|
||||
set_fact:
|
||||
cs_zones: "{{ _cs_zones['zone'] | sort(attribute='name') }}"
|
||||
- name: Get zones on cloud
|
||||
cs_zone_info:
|
||||
register: _cs_zones
|
||||
environment:
|
||||
CLOUDSTACK_KEY: "{{ algo_cs_key }}"
|
||||
CLOUDSTACK_SECRET: "{{ algo_cs_token }}"
|
||||
CLOUDSTACK_ENDPOINT: "{{ algo_cs_url }}"
|
||||
|
||||
- name: Set the default zone
|
||||
set_fact:
|
||||
default_zone: >-
|
||||
{% for z in cs_zones %}
|
||||
{%- if z['name'] == "ch-gva-2" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
- name: Extract zones from output
|
||||
set_fact:
|
||||
cs_zones: "{{ _cs_zones['zones'] | sort(attribute='name') }}"
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What zone should the server be located in?
|
||||
- name: Set the default zone
|
||||
set_fact:
|
||||
default_zone: >-
|
||||
{% for z in cs_zones %}
|
||||
{{ loop.index }}. {{ z['name'] }}
|
||||
{% endfor %}
|
||||
{%- if z['name'] == "ch-gva-2" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
Enter the number of your desired zone
|
||||
[{{ default_zone }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
- pause:
|
||||
prompt: |
|
||||
What zone should the server be located in?
|
||||
{% for z in cs_zones %}
|
||||
{{ loop.index }}. {{ z['name'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired zone
|
||||
[{{ default_zone }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
|
|
@ -2,14 +2,14 @@
|
|||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: "Upload the SSH key"
|
||||
- name: Upload the SSH key
|
||||
digital_ocean_sshkey:
|
||||
oauth_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
ssh_pub_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
register: do_ssh_key
|
||||
|
||||
- name: "Creating a droplet..."
|
||||
- name: Creating a droplet...
|
||||
digital_ocean_droplet:
|
||||
state: present
|
||||
name: "{{ algo_server_name }}"
|
||||
|
@ -26,21 +26,25 @@
|
|||
- Environment:Algo
|
||||
register: digital_ocean_droplet
|
||||
|
||||
- block:
|
||||
- name: "Create a Floating IP"
|
||||
digital_ocean_floating_ip:
|
||||
state: present
|
||||
oauth_token: "{{ algo_do_token }}"
|
||||
droplet_id: "{{ digital_ocean_droplet.data.droplet.id }}"
|
||||
register: digital_ocean_floating_ip
|
||||
# Return data is not idempotent
|
||||
- set_fact:
|
||||
droplet: "{{ digital_ocean_droplet.data.droplet | default(digital_ocean_droplet.data) }}"
|
||||
|
||||
- name: Set the static ip as a fact
|
||||
set_fact:
|
||||
cloud_alternative_ingress_ip: "{{ digital_ocean_floating_ip.data.floating_ip.ip }}"
|
||||
- block:
|
||||
- name: Create a Floating IP
|
||||
digital_ocean_floating_ip:
|
||||
state: present
|
||||
oauth_token: "{{ algo_do_token }}"
|
||||
droplet_id: "{{ droplet.id }}"
|
||||
register: digital_ocean_floating_ip
|
||||
|
||||
- name: Set the static ip as a fact
|
||||
set_fact:
|
||||
cloud_alternative_ingress_ip: "{{ digital_ocean_floating_ip.data.floating_ip.ip }}"
|
||||
when: alternative_ingress_ip
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ digital_ocean_droplet.data.ip_address }}"
|
||||
cloud_instance_ip: "{{ (droplet.networks.v4 | selectattr('type', '==', 'public')).0.ip_address }}"
|
||||
ansible_ssh_user: algo
|
||||
ansible_ssh_port: "{{ ssh_port }}"
|
||||
cloudinit: true
|
||||
|
|
|
@ -18,13 +18,13 @@
|
|||
method: GET
|
||||
status_code: 200
|
||||
headers:
|
||||
Content-Type: "application/json"
|
||||
Authorization: "Bearer {{ algo_do_token }}"
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer {{ algo_do_token }}
|
||||
register: _do_regions
|
||||
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
do_regions: "{{ _do_regions.json.regions | sort(attribute='slug') }}"
|
||||
do_regions: "{{ _do_regions.json.regions | selectattr('available', 'true') | sort(attribute='slug') }}"
|
||||
|
||||
- name: Set default region
|
||||
set_fact:
|
||||
|
|
|
@ -20,9 +20,17 @@ Parameters:
|
|||
Type: String
|
||||
SshPort:
|
||||
Type: String
|
||||
InstanceMarketTypeParameter:
|
||||
Description: Launch a Spot instance or standard on-demand instance
|
||||
Type: String
|
||||
Default: on-demand
|
||||
AllowedValues:
|
||||
- spot
|
||||
- on-demand
|
||||
Conditions:
|
||||
AllocateNewEIP: !Equals [!Ref UseThisElasticIP, '']
|
||||
AssociateExistingEIP: !Not [!Equals [!Ref UseThisElasticIP, '']]
|
||||
InstanceIsSpot: !Equals [spot, !Ref InstanceMarketTypeParameter]
|
||||
Resources:
|
||||
VPC:
|
||||
Type: AWS::EC2::VPC
|
||||
|
@ -146,6 +154,15 @@ Resources:
|
|||
- Key: Name
|
||||
Value: !Ref AWS::StackName
|
||||
|
||||
EC2LaunchTemplate:
|
||||
Type: AWS::EC2::LaunchTemplate
|
||||
Condition: InstanceIsSpot # Only create this template if requested
|
||||
Properties: # a spot instance_market_type in config.cfg
|
||||
LaunchTemplateName: !Ref AWS::StackName
|
||||
LaunchTemplateData:
|
||||
InstanceMarketOptions:
|
||||
MarketType: spot
|
||||
|
||||
EC2Instance:
|
||||
Type: AWS::EC2::Instance
|
||||
DependsOn:
|
||||
|
@ -169,6 +186,14 @@ Resources:
|
|||
SubnetId: !Ref Subnet
|
||||
Ipv6AddressCount: 1
|
||||
UserData: !Ref UserData
|
||||
LaunchTemplate:
|
||||
!If # Only if Conditions created "EC2LaunchTemplate"
|
||||
- InstanceIsSpot
|
||||
-
|
||||
LaunchTemplateId:
|
||||
!Ref EC2LaunchTemplate
|
||||
Version: 1
|
||||
- !Ref AWS::NoValue # Else this LaunchTemplate not set
|
||||
Tags:
|
||||
- Key: Name
|
||||
Value: !Ref AWS::StackName
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
stack_name: "{{ stack_name }}"
|
||||
state: "present"
|
||||
state: present
|
||||
region: "{{ algo_region }}"
|
||||
template: roles/cloud-ec2/files/stack.yaml
|
||||
template_parameters:
|
||||
|
@ -16,6 +16,7 @@
|
|||
EbsEncrypted: "{{ encrypted }}"
|
||||
UserData: "{{ lookup('template', 'files/cloud-init/base.yml') | b64encode }}"
|
||||
SshPort: "{{ ssh_port }}"
|
||||
InstanceMarketTypeParameter: "{{ cloud_providers.ec2.instance_market_type }}"
|
||||
tags:
|
||||
Environment: Algo
|
||||
register: stack
|
||||
|
|
|
@ -6,13 +6,14 @@
|
|||
import_tasks: prompts.yml
|
||||
|
||||
- name: Locate official AMI for region
|
||||
ec2_ami_facts:
|
||||
ec2_ami_info:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
owners: "{{ cloud_providers.ec2.image.owner }}"
|
||||
region: "{{ algo_region }}"
|
||||
filters:
|
||||
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
|
||||
architecture: "{{ cloud_providers.ec2.image.arch }}"
|
||||
name: ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-*64-server-*
|
||||
register: ami_search
|
||||
|
||||
- name: Set the ami id as a fact
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
echo: false
|
||||
register: _aws_access_key
|
||||
when:
|
||||
- aws_access_key is undefined
|
||||
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
|
||||
- aws_access_key is undefined
|
||||
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
|
@ -23,35 +23,35 @@
|
|||
secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
|
||||
- block:
|
||||
- name: Get regions
|
||||
aws_region_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: us-east-1
|
||||
register: _aws_regions
|
||||
- name: Get regions
|
||||
aws_region_info:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: us-east-1
|
||||
register: _aws_regions
|
||||
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
aws_regions: "{{ _aws_regions.regions | sort(attribute='region_name') }}"
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
aws_regions: "{{ _aws_regions.regions | sort(attribute='region_name') }}"
|
||||
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in aws_regions %}
|
||||
{%- if r['region_name'] == "us-east-1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in aws_regions %}
|
||||
{{ loop.index }}. {{ r['region_name'] }}
|
||||
{% endfor %}
|
||||
{%- if r['region_name'] == "us-east-1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
|
||||
{% for r in aws_regions %}
|
||||
{{ loop.index }}. {{ r['region_name'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
||||
- name: Set algo_region and stack_name facts
|
||||
|
@ -63,26 +63,26 @@
|
|||
stack_name: "{{ algo_server_name | replace('.', '-') }}"
|
||||
|
||||
- block:
|
||||
- name: Get existing available Elastic IPs
|
||||
ec2_eip_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: "{{ algo_region }}"
|
||||
register: raw_eip_addresses
|
||||
- name: Get existing available Elastic IPs
|
||||
ec2_eip_info:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: "{{ algo_region }}"
|
||||
register: raw_eip_addresses
|
||||
|
||||
- set_fact:
|
||||
available_eip_addresses: "{{ raw_eip_addresses.addresses | selectattr('association_id', 'undefined') | list }}"
|
||||
- set_fact:
|
||||
available_eip_addresses: "{{ raw_eip_addresses.addresses | selectattr('association_id', 'undefined') | list }}"
|
||||
|
||||
- pause:
|
||||
prompt: >-
|
||||
What Elastic IP would you like to use?
|
||||
{% for eip in available_eip_addresses %}
|
||||
{{ loop.index }}. {{ eip['public_ip'] }}
|
||||
{% endfor %}
|
||||
- pause:
|
||||
prompt: >-
|
||||
What Elastic IP would you like to use?
|
||||
{% for eip in available_eip_addresses %}
|
||||
{{ loop.index }}. {{ eip['public_ip'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired Elastic IP
|
||||
register: _use_existing_eip
|
||||
Enter the number of your desired Elastic IP
|
||||
register: _use_existing_eip
|
||||
|
||||
- set_fact:
|
||||
existing_eip: "{{ available_eip_addresses[_use_existing_eip.user_input | int -1 ]['allocation_id'] }}"
|
||||
- set_fact:
|
||||
existing_eip: "{{ available_eip_addresses[_use_existing_eip.user_input | int -1 ]['allocation_id'] }}"
|
||||
when: cloud_providers.ec2.use_existing_eip
|
||||
|
|
|
@ -27,27 +27,27 @@
|
|||
allowed:
|
||||
- ip_protocol: udp
|
||||
ports:
|
||||
- '500'
|
||||
- '4500'
|
||||
- '{{ wireguard_port|string }}'
|
||||
- "500"
|
||||
- "4500"
|
||||
- "{{ wireguard_port|string }}"
|
||||
- ip_protocol: tcp
|
||||
ports:
|
||||
- '{{ ssh_port }}'
|
||||
- "{{ ssh_port }}"
|
||||
- ip_protocol: icmp
|
||||
|
||||
- block:
|
||||
- name: External IP allocated
|
||||
gcp_compute_address:
|
||||
auth_kind: serviceaccount
|
||||
service_account_file: "{{ credentials_file_path }}"
|
||||
project: "{{ project_id }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
region: "{{ algo_region }}"
|
||||
register: gcp_compute_address
|
||||
- name: External IP allocated
|
||||
gcp_compute_address:
|
||||
auth_kind: serviceaccount
|
||||
service_account_file: "{{ credentials_file_path }}"
|
||||
project: "{{ project_id }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
region: "{{ algo_region }}"
|
||||
register: gcp_compute_address
|
||||
|
||||
- name: Set External IP as a fact
|
||||
set_fact:
|
||||
external_ip: "{{ gcp_compute_address.address }}"
|
||||
- name: Set External IP as a fact
|
||||
set_fact:
|
||||
external_ip: "{{ gcp_compute_address.address }}"
|
||||
when: cloud_providers.gce.external_static_ip
|
||||
|
||||
- name: Instance created
|
||||
|
@ -62,9 +62,9 @@
|
|||
- auto_delete: true
|
||||
boot: true
|
||||
initialize_params:
|
||||
source_image: "projects/ubuntu-os-cloud/global/images/family/{{ cloud_providers.gce.image }}"
|
||||
source_image: projects/ubuntu-os-cloud/global/images/family/{{ cloud_providers.gce.image }}
|
||||
metadata:
|
||||
ssh-keys: "algo:{{ ssh_public_key_lookup }}"
|
||||
ssh-keys: algo:{{ ssh_public_key_lookup }}
|
||||
user-data: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
|
||||
network_interfaces:
|
||||
- network: "{{ gcp_compute_network }}"
|
||||
|
@ -74,7 +74,7 @@
|
|||
type: ONE_TO_ONE_NAT
|
||||
tags:
|
||||
items:
|
||||
- "environment-algo"
|
||||
- environment-algo
|
||||
register: gcp_compute_instance
|
||||
|
||||
- set_fact:
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
- lookup('env','GCE_CREDENTIALS_FILE_PATH')|length <= 0
|
||||
|
||||
- set_fact:
|
||||
credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}"
|
||||
credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'),\
|
||||
\ true) }}"
|
||||
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- set_fact:
|
||||
|
@ -20,40 +21,40 @@
|
|||
project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}"
|
||||
|
||||
- block:
|
||||
- name: Get regions
|
||||
gcp_compute_location_info:
|
||||
auth_kind: serviceaccount
|
||||
service_account_file: "{{ credentials_file_path }}"
|
||||
project: "{{ project_id }}"
|
||||
scope: regions
|
||||
filters: status=UP
|
||||
register: gcp_compute_regions_info
|
||||
- name: Get regions
|
||||
gcp_compute_location_info:
|
||||
auth_kind: serviceaccount
|
||||
service_account_file: "{{ credentials_file_path }}"
|
||||
project: "{{ project_id }}"
|
||||
scope: regions
|
||||
filters: status=UP
|
||||
register: gcp_compute_regions_info
|
||||
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
gce_regions: >-
|
||||
[{%- for region in gcp_compute_regions_info.resources | sort(attribute='name') -%}
|
||||
'{{ region.name }}'{% if not loop.last %},{% endif %}
|
||||
{%- endfor -%}]
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
gce_regions: >-
|
||||
[{%- for region in gcp_compute_regions_info.resources | sort(attribute='name') -%}
|
||||
'{{ region.name }}'{% if not loop.last %},{% endif %}
|
||||
{%- endfor -%}]
|
||||
|
||||
- name: Set facts about the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for region in gce_regions %}
|
||||
{%- if region == "us-east1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
- name: Set facts about the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for region in gce_regions %}
|
||||
{%- if region == "us-east1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://cloud.google.com/compute/docs/regions-zones/#locations)
|
||||
{% for r in gce_regions %}
|
||||
{{ loop.index }}. {{ r }}
|
||||
{% endfor %}
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://cloud.google.com/compute/docs/regions-zones/#locations)
|
||||
{% for r in gce_regions %}
|
||||
{{ loop.index }}. {{ r }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _gce_region
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _gce_region
|
||||
when: region is undefined
|
||||
|
||||
- name: Set region as a fact
|
||||
|
@ -70,8 +71,8 @@
|
|||
project: "{{ project_id }}"
|
||||
scope: zones
|
||||
filters:
|
||||
- "name={{ algo_region }}-*"
|
||||
- "status=UP"
|
||||
- name={{ algo_region }}-*
|
||||
- status=UP
|
||||
register: gcp_compute_zone_info
|
||||
|
||||
- name: Set random available zone as a fact
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
- name: Create an ssh key
|
||||
hcloud_ssh_key:
|
||||
name: "algo-{{ 999999 | random(seed=lookup('file', SSH_keys.public)) }}"
|
||||
name: algo-{{ 999999 | random(seed=lookup('file', SSH_keys.public)) }}
|
||||
public_key: "{{ lookup('file', SSH_keys.public) }}"
|
||||
state: present
|
||||
api_token: "{{ algo_hcloud_token }}"
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
api_token: "{{ algo_hcloud_token }}"
|
||||
register: _hcloud_regions
|
||||
|
||||
- name: Set facts about thre regions
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
hcloud_regions: "{{ hcloud_datacenter_facts | sort(attribute='location') }}"
|
||||
|
||||
|
|
73
roles/cloud-lightsail/files/stack.yaml
Normal file
73
roles/cloud-lightsail/files/stack.yaml
Normal file
|
@ -0,0 +1,73 @@
|
|||
AWSTemplateFormatVersion: '2010-09-09'
|
||||
Description: 'Algo VPN stack (LightSail)'
|
||||
Parameters:
|
||||
InstanceTypeParameter:
|
||||
Type: String
|
||||
Default: 'nano_2_0'
|
||||
ImageIdParameter:
|
||||
Type: String
|
||||
Default: 'ubuntu_20_04'
|
||||
WireGuardPort:
|
||||
Type: String
|
||||
Default: '51820'
|
||||
SshPort:
|
||||
Type: String
|
||||
Default: '4160'
|
||||
UserData:
|
||||
Type: String
|
||||
Default: 'true'
|
||||
Resources:
|
||||
Instance:
|
||||
Type: AWS::Lightsail::Instance
|
||||
Properties:
|
||||
BlueprintId:
|
||||
Ref: ImageIdParameter
|
||||
BundleId:
|
||||
Ref: InstanceTypeParameter
|
||||
InstanceName: !Ref AWS::StackName
|
||||
Networking:
|
||||
Ports:
|
||||
- AccessDirection: inbound
|
||||
Cidrs: ['0.0.0.0/0']
|
||||
Ipv6Cidrs: ['::/0']
|
||||
CommonName: SSH
|
||||
FromPort: !Ref SshPort
|
||||
ToPort: !Ref SshPort
|
||||
Protocol: tcp
|
||||
- AccessDirection: inbound
|
||||
Cidrs: ['0.0.0.0/0']
|
||||
Ipv6Cidrs: ['::/0']
|
||||
CommonName: WireGuard
|
||||
FromPort: !Ref WireGuardPort
|
||||
ToPort: !Ref WireGuardPort
|
||||
Protocol: udp
|
||||
- AccessDirection: inbound
|
||||
Cidrs: ['0.0.0.0/0']
|
||||
Ipv6Cidrs: ['::/0']
|
||||
CommonName: IPSec-4500
|
||||
FromPort: 4500
|
||||
ToPort: 4500
|
||||
Protocol: udp
|
||||
- AccessDirection: inbound
|
||||
Cidrs: ['0.0.0.0/0']
|
||||
Ipv6Cidrs: ['::/0']
|
||||
CommonName: IPSec-500
|
||||
FromPort: 500
|
||||
ToPort: 500
|
||||
Protocol: udp
|
||||
Tags:
|
||||
- Key: Name
|
||||
Value: !Ref AWS::StackName
|
||||
UserData: !Ref UserData
|
||||
|
||||
StaticIP:
|
||||
Type: AWS::Lightsail::StaticIp
|
||||
Properties:
|
||||
AttachedTo: !Ref Instance
|
||||
StaticIpName: !Join [ "-", [ !Ref AWS::StackName, "ip" ] ]
|
||||
DependsOn:
|
||||
- Instance
|
||||
|
||||
Outputs:
|
||||
IpAddress:
|
||||
Value: !GetAtt [StaticIP, IpAddress]
|
19
roles/cloud-lightsail/tasks/cloudformation.yml
Normal file
19
roles/cloud-lightsail/tasks/cloudformation.yml
Normal file
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
- name: Deploy the template
|
||||
cloudformation:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
stack_name: "{{ stack_name }}"
|
||||
state: present
|
||||
region: "{{ algo_region }}"
|
||||
template: roles/cloud-lightsail/files/stack.yaml
|
||||
template_parameters:
|
||||
InstanceTypeParameter: "{{ cloud_providers.lightsail.size }}"
|
||||
ImageIdParameter: "{{ cloud_providers.lightsail.image }}"
|
||||
WireGuardPort: "{{ wireguard_port }}"
|
||||
SshPort: "{{ ssh_port }}"
|
||||
UserData: "{{ lookup('template', 'files/cloud-init/base.sh') }}"
|
||||
tags:
|
||||
Environment: Algo
|
||||
Lightsail: true
|
||||
register: stack
|
|
@ -5,36 +5,11 @@
|
|||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Create an instance
|
||||
lightsail:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
state: present
|
||||
region: "{{ algo_region }}"
|
||||
zone: "{{ algo_region }}a"
|
||||
blueprint_id: "{{ cloud_providers.lightsail.image }}"
|
||||
bundle_id: "{{ cloud_providers.lightsail.size }}"
|
||||
wait_timeout: "300"
|
||||
open_ports:
|
||||
- from_port: "{{ ssh_port }}"
|
||||
to_port: "{{ ssh_port }}"
|
||||
protocol: tcp
|
||||
- from_port: 4500
|
||||
to_port: 4500
|
||||
protocol: udp
|
||||
- from_port: 500
|
||||
to_port: 500
|
||||
protocol: udp
|
||||
- from_port: "{{ wireguard_port }}"
|
||||
to_port: "{{ wireguard_port }}"
|
||||
protocol: udp
|
||||
user_data: |
|
||||
{{ lookup('template', 'files/cloud-init/base.sh') }}
|
||||
register: algo_instance
|
||||
- name: Deploy the stack
|
||||
import_tasks: cloudformation.yml
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ algo_instance['instance']['public_ip_address'] }}"
|
||||
cloud_instance_ip: "{{ stack.stack_outputs.IpAddress }}"
|
||||
ansible_ssh_user: algo
|
||||
ansible_ssh_port: "{{ ssh_port }}"
|
||||
cloudinit: true
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
echo: false
|
||||
register: _aws_access_key
|
||||
when:
|
||||
- aws_access_key is undefined
|
||||
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
|
||||
- aws_access_key is undefined
|
||||
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
|
@ -23,38 +23,39 @@
|
|||
secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
|
||||
- block:
|
||||
- name: Get regions
|
||||
lightsail_region_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: us-east-1
|
||||
register: _lightsail_regions
|
||||
- name: Get regions
|
||||
lightsail_region_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: us-east-1
|
||||
register: _lightsail_regions
|
||||
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
lightsail_regions: "{{ _lightsail_regions.data.regions | sort(attribute='name') }}"
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
lightsail_regions: "{{ _lightsail_regions.data.regions | sort(attribute='name') }}"
|
||||
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in lightsail_regions %}
|
||||
{%- if r['name'] == "us-east-1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/)
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in lightsail_regions %}
|
||||
{{ (loop.index|string + '.').ljust(3) }} {{ r['name'].ljust(20) }} {{ r['displayName'] }}
|
||||
{% endfor %}
|
||||
{%- if r['name'] == "us-east-1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/)
|
||||
{% for r in lightsail_regions %}
|
||||
{{ (loop.index|string + '.').ljust(3) }} {{ r['name'].ljust(20) }} {{ r['displayName'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
||||
- set_fact:
|
||||
stack_name: "{{ algo_server_name | replace('.', '-') }}"
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input %}{{ lightsail_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
|
||||
- name: Update the stackscript
|
||||
uri:
|
||||
url: "https://api.linode.com/v4/linode/stackscripts/{{ _linode_stackscript.stackscript.id }}"
|
||||
url: https://api.linode.com/v4/linode/stackscripts/{{ _linode_stackscript.stackscript.id }}
|
||||
method: PUT
|
||||
body_format: json
|
||||
body:
|
||||
|
@ -34,10 +34,10 @@
|
|||
{{ stackscript }}
|
||||
headers:
|
||||
Content-Type: application/json
|
||||
Authorization: "Bearer {{ algo_linode_token }}"
|
||||
Authorization: Bearer {{ algo_linode_token }}
|
||||
when: (_linode_stackscript.stackscript.script | hash('md5')) != (stackscript | hash('md5'))
|
||||
|
||||
- name: "Creating an instance..."
|
||||
- name: Creating an instance...
|
||||
linode_v4:
|
||||
access_token: "{{ algo_linode_token }}"
|
||||
label: "{{ algo_server_name }}"
|
||||
|
|
|
@ -7,14 +7,14 @@
|
|||
import_tasks: venv.yml
|
||||
|
||||
- name: Security group created
|
||||
os_security_group:
|
||||
openstack.cloud.security_group:
|
||||
state: "{{ state|default('present') }}"
|
||||
name: "{{ algo_server_name }}-security_group"
|
||||
description: AlgoVPN security group
|
||||
register: os_security_group
|
||||
|
||||
- name: Security rules created
|
||||
os_security_group_rule:
|
||||
openstack.cloud.security_group_rule:
|
||||
state: "{{ state|default('present') }}"
|
||||
security_group: "{{ os_security_group.id }}"
|
||||
protocol: "{{ item.proto }}"
|
||||
|
@ -22,29 +22,32 @@
|
|||
port_range_max: "{{ item.port_max }}"
|
||||
remote_ip_prefix: "{{ item.range }}"
|
||||
with_items:
|
||||
- { proto: tcp, port_min: '{{ ssh_port }}', port_max: '{{ ssh_port }}', range: 0.0.0.0/0 }
|
||||
- { proto: tcp, port_min: "{{ ssh_port }}", port_max: "{{ ssh_port }}", range: 0.0.0.0/0 }
|
||||
- { proto: icmp, port_min: -1, port_max: -1, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: 4500, port_max: 4500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: 500, port_max: 500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: "{{ wireguard_port }}", port_max: "{{ wireguard_port }}", range: 0.0.0.0/0 }
|
||||
|
||||
- name: Gather facts about flavors
|
||||
os_flavor_facts:
|
||||
openstack.cloud.compute_flavor_info:
|
||||
ram: "{{ cloud_providers.openstack.flavor_ram }}"
|
||||
register: os_flavor
|
||||
|
||||
- name: Gather facts about images
|
||||
os_image_facts:
|
||||
openstack.cloud.image_info:
|
||||
register: os_image
|
||||
|
||||
- name: Set image as a fact
|
||||
set_fact:
|
||||
image_id: "{{ item.id }}"
|
||||
loop: "{{ openstack_image }}"
|
||||
loop: "{{ os_image.openstack_image }}"
|
||||
when:
|
||||
- item.name == cloud_providers.openstack.image
|
||||
- item.status == "active"
|
||||
|
||||
- name: Gather facts about public networks
|
||||
os_networks_facts:
|
||||
openstack.cloud.networks_info:
|
||||
register: os_network
|
||||
|
||||
- name: Set the network as a fact
|
||||
set_fact:
|
||||
|
@ -53,15 +56,15 @@
|
|||
- item['router:external']|default(omit)
|
||||
- item['admin_state_up']|default(omit)
|
||||
- item['status'] == 'ACTIVE'
|
||||
with_items: "{{ openstack_networks }}"
|
||||
with_items: "{{ os_network.openstack_networks }}"
|
||||
|
||||
- name: Set facts
|
||||
set_fact:
|
||||
flavor_id: "{{ (openstack_flavors | sort(attribute='ram'))[0]['id'] }}"
|
||||
flavor_id: "{{ (os_flavor.openstack_flavors | sort(attribute='ram'))[0]['id'] }}"
|
||||
security_group_name: "{{ os_security_group['secgroup']['name'] }}"
|
||||
|
||||
- name: Server created
|
||||
os_server:
|
||||
openstack.cloud.server:
|
||||
state: "{{ state|default('present') }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
image: "{{ image_id }}"
|
||||
|
|
|
@ -1,71 +1,74 @@
|
|||
---
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- block:
|
||||
- name: Gather Scaleway organizations facts
|
||||
scaleway_organization_facts:
|
||||
- name: Gather Scaleway organizations facts
|
||||
scaleway_organization_info:
|
||||
register: scaleway_org
|
||||
|
||||
- name: Get images
|
||||
scaleway_image_facts:
|
||||
region: "{{ algo_region }}"
|
||||
- name: Get images
|
||||
scaleway_image_info:
|
||||
region: "{{ algo_region }}"
|
||||
register: scaleway_image
|
||||
|
||||
- name: Set cloud specific facts
|
||||
set_fact:
|
||||
organization_id: "{{ scaleway_organization_facts[0]['id'] }}"
|
||||
images: >-
|
||||
[{% for i in scaleway_image_facts -%}
|
||||
{% if i.name == cloud_providers.scaleway.image and
|
||||
i.arch == cloud_providers.scaleway.arch -%}
|
||||
'{{ i.id }}'{% if not loop.last %},{% endif %}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}]
|
||||
- name: Set cloud specific facts
|
||||
set_fact:
|
||||
organization_id: "{{ scaleway_org.scaleway_organization_info[0]['id'] }}"
|
||||
images: >-
|
||||
[{% for i in scaleway_image.scaleway_image_info -%}
|
||||
{% if i.name == cloud_providers.scaleway.image and
|
||||
i.arch == cloud_providers.scaleway.arch -%}
|
||||
'{{ i.id }}'{% if not loop.last %},{% endif %}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}]
|
||||
|
||||
- name: Create a server
|
||||
scaleway_compute:
|
||||
name: "{{ algo_server_name }}"
|
||||
enable_ipv6: true
|
||||
public_ip: dynamic
|
||||
boot_type: local
|
||||
state: present
|
||||
image: "{{ images[0] }}"
|
||||
organization: "{{ organization_id }}"
|
||||
region: "{{ algo_region }}"
|
||||
commercial_type: "{{ cloud_providers.scaleway.size }}"
|
||||
wait: true
|
||||
tags:
|
||||
- Environment:Algo
|
||||
- AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }}
|
||||
register: scaleway_compute
|
||||
- name: Create a server
|
||||
scaleway_compute:
|
||||
name: "{{ algo_server_name }}"
|
||||
enable_ipv6: true
|
||||
public_ip: dynamic
|
||||
boot_type: local
|
||||
state: present
|
||||
image: "{{ images[0] }}"
|
||||
organization: "{{ organization_id }}"
|
||||
region: "{{ algo_region }}"
|
||||
commercial_type: "{{ cloud_providers.scaleway.size }}"
|
||||
wait: true
|
||||
tags:
|
||||
- Environment:Algo
|
||||
- AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }}
|
||||
register: scaleway_compute
|
||||
|
||||
- name: Patch the cloud-init
|
||||
uri:
|
||||
url: "https://cp-{{ algo_region }}.scaleway.com/servers/{{ scaleway_compute.msg.id }}/user_data/cloud-init"
|
||||
method: PATCH
|
||||
body: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
|
||||
status_code: 204
|
||||
headers:
|
||||
Content-Type: "text/plain"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
- name: Patch the cloud-init
|
||||
uri:
|
||||
url: https://cp-{{ algo_region }}.scaleway.com/servers/{{ scaleway_compute.msg.id }}/user_data/cloud-init
|
||||
method: PATCH
|
||||
body: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
|
||||
status_code: 204
|
||||
headers:
|
||||
Content-Type: text/plain
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
|
||||
- name: Start the server
|
||||
scaleway_compute:
|
||||
name: "{{ algo_server_name }}"
|
||||
enable_ipv6: true
|
||||
public_ip: dynamic
|
||||
boot_type: local
|
||||
state: running
|
||||
image: "{{ images[0] }}"
|
||||
organization: "{{ organization_id }}"
|
||||
region: "{{ algo_region }}"
|
||||
commercial_type: "{{ cloud_providers.scaleway.size }}"
|
||||
wait: true
|
||||
tags:
|
||||
- Environment:Algo
|
||||
- AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }}
|
||||
register: algo_instance
|
||||
until: algo_instance.msg.public_ip
|
||||
retries: 3
|
||||
delay: 3
|
||||
- name: Start the server
|
||||
scaleway_compute:
|
||||
name: "{{ algo_server_name }}"
|
||||
enable_ipv6: true
|
||||
public_ip: dynamic
|
||||
boot_type: local
|
||||
state: running
|
||||
image: "{{ images[0] }}"
|
||||
organization: "{{ organization_id }}"
|
||||
region: "{{ algo_region }}"
|
||||
commercial_type: "{{ cloud_providers.scaleway.size }}"
|
||||
wait: true
|
||||
tags:
|
||||
- Environment:Algo
|
||||
- AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }}
|
||||
register: algo_instance
|
||||
until: algo_instance.msg.public_ip
|
||||
retries: 3
|
||||
delay: 3
|
||||
environment:
|
||||
SCW_TOKEN: "{{ algo_scaleway_token }}"
|
||||
|
||||
|
|
|
@ -3,56 +3,54 @@
|
|||
import_tasks: prompts.yml
|
||||
|
||||
- block:
|
||||
- name: Creating a firewall group
|
||||
vultr_firewall_group:
|
||||
name: "{{ algo_server_name }}"
|
||||
- name: Creating a firewall group
|
||||
vultr_firewall_group:
|
||||
name: "{{ algo_server_name }}"
|
||||
|
||||
- name: Creating firewall rules
|
||||
vultr_firewall_rule:
|
||||
group: "{{ algo_server_name }}"
|
||||
protocol: "{{ item.protocol }}"
|
||||
port: "{{ item.port }}"
|
||||
ip_version: "{{ item.ip }}"
|
||||
cidr: "{{ item.cidr }}"
|
||||
with_items:
|
||||
- { protocol: tcp, port: "{{ ssh_port }}", ip: v4, cidr: "0.0.0.0/0" }
|
||||
- { protocol: tcp, port: "{{ ssh_port }}", ip: v6, cidr: "::/0" }
|
||||
- { protocol: udp, port: 500, ip: v4, cidr: "0.0.0.0/0" }
|
||||
- { protocol: udp, port: 500, ip: v6, cidr: "::/0" }
|
||||
- { protocol: udp, port: 4500, ip: v4, cidr: "0.0.0.0/0" }
|
||||
- { protocol: udp, port: 4500, ip: v6, cidr: "::/0" }
|
||||
- { protocol: udp, port: "{{ wireguard_port }}", ip: v4, cidr: "0.0.0.0/0" }
|
||||
- { protocol: udp, port: "{{ wireguard_port }}", ip: v6, cidr: "::/0" }
|
||||
- name: Creating firewall rules
|
||||
vultr_firewall_rule:
|
||||
group: "{{ algo_server_name }}"
|
||||
protocol: "{{ item.protocol }}"
|
||||
port: "{{ item.port }}"
|
||||
ip_version: "{{ item.ip }}"
|
||||
cidr: "{{ item.cidr }}"
|
||||
with_items:
|
||||
- { protocol: tcp, port: "{{ ssh_port }}", ip: v4, cidr: 0.0.0.0/0 }
|
||||
- { protocol: tcp, port: "{{ ssh_port }}", ip: v6, cidr: "::/0" }
|
||||
- { protocol: udp, port: 500, ip: v4, cidr: 0.0.0.0/0 }
|
||||
- { protocol: udp, port: 500, ip: v6, cidr: "::/0" }
|
||||
- { protocol: udp, port: 4500, ip: v4, cidr: 0.0.0.0/0 }
|
||||
- { protocol: udp, port: 4500, ip: v6, cidr: "::/0" }
|
||||
- { protocol: udp, port: "{{ wireguard_port }}", ip: v4, cidr: 0.0.0.0/0 }
|
||||
- { protocol: udp, port: "{{ wireguard_port }}", ip: v6, cidr: "::/0" }
|
||||
|
||||
- name: Upload the startup script
|
||||
vultr_startup_script:
|
||||
name: algo-startup
|
||||
script: |
|
||||
{{ lookup('template', 'files/cloud-init/base.sh') }}
|
||||
mkdir -p /var/lib/cloud/data/ || true
|
||||
touch /var/lib/cloud/data/result.json
|
||||
- name: Upload the startup script
|
||||
vultr_startup_script:
|
||||
name: algo-startup
|
||||
script: |
|
||||
{{ lookup('template', 'files/cloud-init/base.yml') }}
|
||||
|
||||
- name: Creating a server
|
||||
vultr_server:
|
||||
name: "{{ algo_server_name }}"
|
||||
startup_script: algo-startup
|
||||
hostname: "{{ algo_server_name }}"
|
||||
os: "{{ cloud_providers.vultr.os }}"
|
||||
plan: "{{ cloud_providers.vultr.size }}"
|
||||
region: "{{ algo_vultr_region }}"
|
||||
firewall_group: "{{ algo_server_name }}"
|
||||
state: started
|
||||
tag: Environment:Algo
|
||||
ipv6_enabled: true
|
||||
auto_backup_enabled: false
|
||||
notify_activate: false
|
||||
register: vultr_server
|
||||
- name: Creating a server
|
||||
vultr_server:
|
||||
name: "{{ algo_server_name }}"
|
||||
startup_script: algo-startup
|
||||
hostname: "{{ algo_server_name }}"
|
||||
os: "{{ cloud_providers.vultr.os }}"
|
||||
plan: "{{ cloud_providers.vultr.size }}"
|
||||
region: "{{ algo_vultr_region }}"
|
||||
firewall_group: "{{ algo_server_name }}"
|
||||
state: started
|
||||
tag: Environment:Algo
|
||||
ipv6_enabled: true
|
||||
auto_backup_enabled: false
|
||||
notify_activate: false
|
||||
register: vultr_server
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ vultr_server.vultr_server.v4_main_ip }}"
|
||||
ansible_ssh_user: algo
|
||||
ansible_ssh_port: "{{ ssh_port }}"
|
||||
cloudinit: true
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ vultr_server.vultr_server.v4_main_ip }}"
|
||||
ansible_ssh_user: algo
|
||||
ansible_ssh_port: "{{ ssh_port }}"
|
||||
cloudinit: true
|
||||
|
||||
environment:
|
||||
VULTR_API_CONFIG: "{{ algo_vultr_config }}"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- name: restart rsyslog
|
||||
service: name=rsyslog state=restarted
|
||||
|
||||
|
|
|
@ -13,13 +13,12 @@
|
|||
|
||||
- name: Gather facts
|
||||
setup:
|
||||
|
||||
- name: Gather additional facts
|
||||
import_tasks: facts.yml
|
||||
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
config_prefix: "/usr/local/"
|
||||
config_prefix: /usr/local/
|
||||
strongswan_shell: /usr/sbin/nologin
|
||||
strongswan_home: /var/empty
|
||||
root_group: wheel
|
||||
|
@ -50,7 +49,7 @@
|
|||
- name: Loopback included into the rc config
|
||||
blockinfile:
|
||||
dest: /etc/rc.conf
|
||||
create: yes
|
||||
create: true
|
||||
block: |
|
||||
cloned_interfaces="lo100"
|
||||
ifconfig_lo100="inet {{ local_service_ip }} netmask 255.255.255.255"
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
- name: Iptables configured
|
||||
template:
|
||||
src: "{{ item.src }}"
|
||||
|
|
|
@ -6,6 +6,9 @@
|
|||
tags:
|
||||
- update-users
|
||||
|
||||
- fail:
|
||||
when: cloud_test|default(false)|bool
|
||||
|
||||
- include_tasks: ubuntu.yml
|
||||
when: '"Ubuntu" in OS.stdout or "Linux" in OS.stdout'
|
||||
tags:
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
---
|
||||
- name: Gather facts
|
||||
setup:
|
||||
|
||||
- name: Cloud only tasks
|
||||
block:
|
||||
- name: Install software updates
|
||||
|
@ -36,14 +35,14 @@
|
|||
become: false
|
||||
when: algo_provider != "local"
|
||||
|
||||
- name: Include unatteded upgrades configuration
|
||||
- name: Include unattended upgrades configuration
|
||||
import_tasks: unattended-upgrades.yml
|
||||
|
||||
- name: Disable MOTD on login and SSHD
|
||||
replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}"
|
||||
with_items:
|
||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/login' }
|
||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/sshd' }
|
||||
- { regexp: ^session.*optional.*pam_motd.so.*, line: "# MOTD DISABLED", file: /etc/pam.d/login }
|
||||
- { regexp: ^session.*optional.*pam_motd.so.*, line: "# MOTD DISABLED", file: /etc/pam.d/sshd }
|
||||
|
||||
- name: Ensure fallback resolvers are set
|
||||
ini_file:
|
||||
|
@ -75,7 +74,7 @@
|
|||
|
||||
- name: Check apparmor support
|
||||
command: apparmor_status
|
||||
ignore_errors: yes
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
register: apparmor_status
|
||||
|
||||
|
@ -117,9 +116,9 @@
|
|||
apt:
|
||||
name:
|
||||
- linux-headers-generic
|
||||
- "linux-headers-{{ ansible_kernel }}"
|
||||
- linux-headers-{{ ansible_kernel }}
|
||||
state: present
|
||||
when: install_headers
|
||||
when: install_headers | bool
|
||||
|
||||
- name: Configure the alternative ingress ip
|
||||
include_tasks: aip/main.yml
|
||||
|
|
|
@ -95,7 +95,7 @@ COMMIT
|
|||
-A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||
# Drop SMB/CIFS traffic that requests to be forwarded
|
||||
-A FORWARD -p tcp --dport 445 -j {{ "DROP" if block_smb else "ACCEPT" }}
|
||||
# Drop NETBIOS trafic that requests to be forwarded
|
||||
# Drop NETBIOS traffic that requests to be forwarded
|
||||
-A FORWARD -p udp -m multiport --ports 137,138 -j {{ "DROP" if block_netbios else "ACCEPT" }}
|
||||
-A FORWARD -p tcp -m multiport --ports 137,139 -j {{ "DROP" if block_netbios else "ACCEPT" }}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
/etc/dnscrypt-proxy/** r,
|
||||
/usr/bin/dnscrypt-proxy mr,
|
||||
/tmp/public-resolvers.md* rw,
|
||||
/var/cache/{private/,}dnscrypt-proxy/** rw,
|
||||
|
||||
/tmp/*.tmp w,
|
||||
owner /tmp/*.tmp r,
|
||||
|
|
|
@ -6,4 +6,4 @@
|
|||
- name: Enable mac_portacl
|
||||
lineinfile:
|
||||
path: /etc/rc.conf
|
||||
line: 'dnscrypt_proxy_mac_portacl_enable="YES"'
|
||||
line: dnscrypt_proxy_mac_portacl_enable="YES"
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
---
|
||||
- block:
|
||||
- name: Add the repository
|
||||
apt_repository:
|
||||
state: present
|
||||
codename: "{{ ansible_distribution_release }}"
|
||||
repo: ppa:shevchuk/dnscrypt-proxy
|
||||
register: result
|
||||
until: result is succeeded
|
||||
retries: 10
|
||||
delay: 3
|
||||
- name: Add the repository
|
||||
apt_repository:
|
||||
state: present
|
||||
codename: "{{ ansible_distribution_release }}"
|
||||
repo: ppa:shevchuk/dnscrypt-proxy
|
||||
register: result
|
||||
until: result is succeeded
|
||||
retries: 10
|
||||
delay: 3
|
||||
|
||||
- name: Configure unattended-upgrades
|
||||
copy:
|
||||
src: 50-dnscrypt-proxy-unattended-upgrades
|
||||
dest: /etc/apt/apt.conf.d/50-dnscrypt-proxy-unattended-upgrades
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
- name: Configure unattended-upgrades
|
||||
copy:
|
||||
src: 50-dnscrypt-proxy-unattended-upgrades
|
||||
dest: /etc/apt/apt.conf.d/50-dnscrypt-proxy-unattended-upgrades
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_facts['distribution_version'] is version('20.04', '<')
|
||||
|
||||
- name: Install dnscrypt-proxy
|
||||
|
@ -26,18 +26,18 @@
|
|||
update_cache: true
|
||||
|
||||
- block:
|
||||
- name: Ubuntu | Configure AppArmor policy for dnscrypt-proxy
|
||||
copy:
|
||||
src: apparmor.profile.dnscrypt-proxy
|
||||
dest: /etc/apparmor.d/usr.bin.dnscrypt-proxy
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0600
|
||||
notify: restart dnscrypt-proxy
|
||||
- name: Ubuntu | Configure AppArmor policy for dnscrypt-proxy
|
||||
copy:
|
||||
src: apparmor.profile.dnscrypt-proxy
|
||||
dest: /etc/apparmor.d/usr.bin.dnscrypt-proxy
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0600
|
||||
notify: restart dnscrypt-proxy
|
||||
|
||||
- name: Ubuntu | Enforce the dnscrypt-proxy AppArmor policy
|
||||
command: aa-enforce usr.bin.dnscrypt-proxy
|
||||
changed_when: false
|
||||
- name: Ubuntu | Enforce the dnscrypt-proxy AppArmor policy
|
||||
command: aa-enforce usr.bin.dnscrypt-proxy
|
||||
changed_when: false
|
||||
tags: apparmor
|
||||
when: apparmor_enabled|default(false)|bool
|
||||
|
||||
|
@ -60,4 +60,4 @@
|
|||
[Service]
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
notify:
|
||||
- restart dnscrypt-proxy
|
||||
- restart dnscrypt-proxy
|
||||
|
|
|
@ -118,11 +118,12 @@ timeout = 2500
|
|||
keepalive = 30
|
||||
|
||||
|
||||
## Use the REFUSED return code for blocked responses
|
||||
## Setting this to `false` means that some responses will be lies.
|
||||
## Unfortunately, `false` appears to be required for Android 8+
|
||||
## Response for blocked queries. Options are `refused`, `hinfo` (default) or
|
||||
## an IP response. To give an IP response, use the format `a:<IPv4>,aaaa:<IPv6>`.
|
||||
## Using the `hinfo` option means that some responses will be lies.
|
||||
## Unfortunately, the `hinfo` option appears to be required for Android 8+
|
||||
|
||||
refused_code_in_responses = false
|
||||
# blocked_query_response = 'refused'
|
||||
|
||||
|
||||
## Load-balancing strategy: 'p2' (default), 'ph', 'first' or 'random'
|
||||
|
@ -523,7 +524,7 @@ cache_neg_max_ttl = 600
|
|||
|
||||
[sources.'public-resolvers']
|
||||
urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v2/public-resolvers.md', 'https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md']
|
||||
cache_file = '/tmp/public-resolvers.md'
|
||||
cache_file = '/var/cache/dnscrypt-proxy/public-resolvers.md'
|
||||
minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
|
||||
prefix = ''
|
||||
|
||||
|
@ -550,5 +551,10 @@ cache_neg_max_ttl = 600
|
|||
|
||||
[static]
|
||||
|
||||
{% if custom_server_stamps %}{% for name, stamp in custom_server_stamps.items() %}
|
||||
[static.'{{ name }}']
|
||||
stamp = '{{ stamp }}'
|
||||
{%- endfor %}{% endif %}
|
||||
|
||||
# [static.'myserver']
|
||||
# stamp = 'sdns:AQcAAAAAAAAAAAAQMi5kbnNjcnlwdC1jZXJ0Lg'
|
||||
|
|
|
@ -1,4 +1,16 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: "{{ item }}"
|
||||
when: not tests|default(false)|bool
|
||||
tags:
|
||||
- skip_ansible_lint
|
||||
with_items: |
|
||||
https://trailofbits.github.io/algo/deploy-to-ubuntu.html
|
||||
|
||||
Local installation might break your server. Use at your own risk.
|
||||
|
||||
Proceed? Press ENTER to continue or CTRL+C and A to abort...
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the IP address of your server: (or use localhost for local installation):
|
||||
|
@ -8,25 +20,25 @@
|
|||
|
||||
- name: Set the facts
|
||||
set_fact:
|
||||
cloud_instance_ip: >-
|
||||
cloud_instance_ip: >-
|
||||
{% if server is defined %}{{ server }}
|
||||
{%- elif _algo_server.user_input %}{{ _algo_server.user_input }}
|
||||
{%- else %}localhost{% endif %}
|
||||
|
||||
- block:
|
||||
- pause:
|
||||
prompt: |
|
||||
What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost)
|
||||
[root]
|
||||
register: _algo_ssh_user
|
||||
when: ssh_user is undefined
|
||||
- pause:
|
||||
prompt: |
|
||||
What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost)
|
||||
[root]
|
||||
register: _algo_ssh_user
|
||||
when: ssh_user is undefined
|
||||
|
||||
- name: Set the facts
|
||||
set_fact:
|
||||
ansible_ssh_user: >-
|
||||
{% if ssh_user is defined %}{{ ssh_user }}
|
||||
{%- elif _algo_ssh_user.user_input %}{{ _algo_ssh_user.user_input }}
|
||||
{%- else %}root{% endif %}
|
||||
- name: Set the facts
|
||||
set_fact:
|
||||
ansible_ssh_user: >-
|
||||
{% if ssh_user is defined %}{{ ssh_user }}
|
||||
{%- elif _algo_ssh_user.user_input %}{{ _algo_ssh_user.user_input }}
|
||||
{%- else %}root{% endif %}
|
||||
when: cloud_instance_ip != "localhost"
|
||||
|
||||
- pause:
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
---
|
||||
ssh_tunnels_config_path: "configs/{{ IP_subject_alt_name }}/ssh-tunnel/"
|
||||
ssh_tunnels_config_path: configs/{{ IP_subject_alt_name }}/ssh-tunnel/
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
---
|
||||
- name: restart ssh
|
||||
service: name="{{ ssh_service_name|default('ssh') }}" state=restarted
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: Ensure that the sshd_config file has desired options
|
||||
blockinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
marker: '# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role'
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role"
|
||||
block: |
|
||||
Match Group algo
|
||||
AllowTcpForwarding local
|
||||
|
@ -28,90 +28,90 @@
|
|||
group: "{{ root_group|default('root') }}"
|
||||
|
||||
- block:
|
||||
- name: Ensure that the SSH users exist
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
group: algo
|
||||
home: '/var/jail/{{ item }}'
|
||||
createhome: yes
|
||||
generate_ssh_key: false
|
||||
shell: /bin/false
|
||||
state: present
|
||||
append: yes
|
||||
with_items: "{{ users }}"
|
||||
- name: Ensure that the SSH users exist
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
group: algo
|
||||
home: /var/jail/{{ item }}
|
||||
createhome: true
|
||||
generate_ssh_key: false
|
||||
shell: /bin/false
|
||||
state: present
|
||||
append: true
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- block:
|
||||
- name: Clean up the ssh-tunnel directory
|
||||
file:
|
||||
dest: "{{ ssh_tunnels_config_path }}"
|
||||
- block:
|
||||
- name: Clean up the ssh-tunnel directory
|
||||
file:
|
||||
dest: "{{ ssh_tunnels_config_path }}"
|
||||
state: absent
|
||||
when: keys_clean_all|bool
|
||||
|
||||
- name: Ensure the config directories exist
|
||||
file:
|
||||
dest: "{{ ssh_tunnels_config_path }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: "0700"
|
||||
|
||||
- name: Check if the private keys exist
|
||||
stat:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item }}.pem"
|
||||
register: privatekey
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Build ssh private keys
|
||||
openssl_privatekey:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item.item }}.pem"
|
||||
passphrase: "{{ p12_export_password }}"
|
||||
cipher: auto
|
||||
force: false
|
||||
no_log: "{{ no_log|bool }}"
|
||||
when: not item.stat.exists
|
||||
with_items: "{{ privatekey.results }}"
|
||||
register: openssl_privatekey
|
||||
|
||||
- name: Build ssh public keys
|
||||
openssl_publickey:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pub"
|
||||
privatekey_path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pem"
|
||||
privatekey_passphrase: "{{ p12_export_password }}"
|
||||
format: OpenSSH
|
||||
force: true
|
||||
no_log: "{{ no_log|bool }}"
|
||||
when: item.changed
|
||||
with_items: "{{ openssl_privatekey.results }}"
|
||||
|
||||
- name: Build the client ssh config
|
||||
template:
|
||||
src: ssh_config.j2
|
||||
dest: "{{ ssh_tunnels_config_path }}/{{ item }}.ssh_config"
|
||||
mode: 0700
|
||||
with_items: "{{ users }}"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: The authorized keys file created
|
||||
authorized_key:
|
||||
user: "{{ item }}"
|
||||
key: "{{ lookup('file', ssh_tunnels_config_path + '/' + item + '.pub') }}"
|
||||
state: present
|
||||
manage_dir: true
|
||||
exclusive: true
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Get active users
|
||||
getent:
|
||||
database: group
|
||||
key: algo
|
||||
split: ":"
|
||||
|
||||
- name: Delete non-existing users
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
when: keys_clean_all|bool
|
||||
|
||||
- name: Ensure the config directories exist
|
||||
file:
|
||||
dest: "{{ ssh_tunnels_config_path }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
mode: '0700'
|
||||
|
||||
- name: Check if the private keys exist
|
||||
stat:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item }}.pem"
|
||||
register: privatekey
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Build ssh private keys
|
||||
openssl_privatekey:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item.item }}.pem"
|
||||
passphrase: "{{ p12_export_password }}"
|
||||
cipher: aes256
|
||||
force: false
|
||||
no_log: "{{ no_log|bool }}"
|
||||
when: not item.stat.exists
|
||||
with_items: "{{ privatekey.results }}"
|
||||
register: openssl_privatekey
|
||||
|
||||
- name: Build ssh public keys
|
||||
openssl_publickey:
|
||||
path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pub"
|
||||
privatekey_path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pem"
|
||||
privatekey_passphrase: "{{ p12_export_password }}"
|
||||
format: OpenSSH
|
||||
remove: true
|
||||
force: true
|
||||
no_log: "{{ no_log|bool }}"
|
||||
when: item.changed
|
||||
with_items: "{{ openssl_privatekey.results }}"
|
||||
|
||||
- name: Build the client ssh config
|
||||
template:
|
||||
src: ssh_config.j2
|
||||
dest: "{{ ssh_tunnels_config_path }}/{{ item }}.ssh_config"
|
||||
mode: 0700
|
||||
with_items: "{{ users }}"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: The authorized keys file created
|
||||
authorized_key:
|
||||
user: "{{ item }}"
|
||||
key: "{{ lookup('file', ssh_tunnels_config_path + '/' + item + '.pub') }}"
|
||||
state: present
|
||||
manage_dir: true
|
||||
exclusive: true
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Get active users
|
||||
getent:
|
||||
database: group
|
||||
key: algo
|
||||
split: ':'
|
||||
|
||||
- name: Delete non-existing users
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
remove: yes
|
||||
force: yes
|
||||
when: item not in users
|
||||
with_items: "{{ getent_group['algo'][2].split(',') }}"
|
||||
when: item not in users
|
||||
with_items: "{{ getent_group['algo'][2].split(',') }}"
|
||||
tags: update-users
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
ipsec_config_path: "configs/{{ IP_subject_alt_name }}/ipsec/"
|
||||
ipsec_config_path: configs/{{ IP_subject_alt_name }}/ipsec/
|
||||
ipsec_pki_path: "{{ ipsec_config_path }}/.pki/"
|
||||
strongswan_shell: /usr/sbin/nologin
|
||||
strongswan_home: /var/lib/strongswan
|
||||
|
@ -7,7 +7,7 @@ strongswan_service: "{{ 'strongswan-starter' if ansible_facts['distribution_vers
|
|||
BetweenClients_DROP: true
|
||||
algo_ondemand_cellular: false
|
||||
algo_ondemand_wifi: false
|
||||
algo_ondemand_wifi_exclude: '_null'
|
||||
algo_ondemand_wifi_exclude: _null
|
||||
algo_dns_adblocking: false
|
||||
ipv6_support: false
|
||||
dns_encryption: true
|
||||
|
@ -16,7 +16,7 @@ subjectAltName_type: "{{ 'DNS' if IP_subject_alt_name|regex_search('[a-z]') else
|
|||
subjectAltName: >-
|
||||
{{ subjectAltName_type }}:{{ IP_subject_alt_name }}
|
||||
{%- if ipv6_support -%},IP:{{ ansible_default_ipv6['address'] }}{%- endif -%}
|
||||
subjectAltName_USER: "email:{{ item }}@{{ openssl_constraint_random_id }}"
|
||||
subjectAltName_USER: email:{{ item }}@{{ openssl_constraint_random_id }}
|
||||
nameConstraints: >-
|
||||
critical,permitted;{{ subjectAltName_type }}:{{ IP_subject_alt_name }}{{- '/255.255.255.255' if subjectAltName_type == 'IP' else '' -}}
|
||||
{%- if subjectAltName_type == 'IP' -%}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- name: restart strongswan
|
||||
service: name={{ strongswan_service }} state=restarted
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
set -o pipefail
|
||||
cat private/{{ item }}.p12 |
|
||||
base64
|
||||
register: PayloadContent
|
||||
register: PayloadContent
|
||||
changed_when: false
|
||||
args:
|
||||
executable: bash
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
- name: Copy the keys to the strongswan directory
|
||||
copy:
|
||||
src: "{{ ipsec_pki_path }}/{{ item.src }}"
|
||||
|
@ -8,18 +7,18 @@
|
|||
group: "{{ item.group }}"
|
||||
mode: "{{ item.mode }}"
|
||||
with_items:
|
||||
- src: "cacert.pem"
|
||||
dest: "cacerts/ca.crt"
|
||||
- src: cacert.pem
|
||||
dest: cacerts/ca.crt
|
||||
owner: strongswan
|
||||
group: "{{ root_group|default('root') }}"
|
||||
mode: "0600"
|
||||
- src: "certs/{{ IP_subject_alt_name }}.crt"
|
||||
dest: "certs/{{ IP_subject_alt_name }}.crt"
|
||||
- src: certs/{{ IP_subject_alt_name }}.crt
|
||||
dest: certs/{{ IP_subject_alt_name }}.crt
|
||||
owner: strongswan
|
||||
group: "{{ root_group|default('root') }}"
|
||||
mode: "0600"
|
||||
- src: "private/{{ IP_subject_alt_name }}.key"
|
||||
dest: "private/{{ IP_subject_alt_name }}.key"
|
||||
- src: private/{{ IP_subject_alt_name }}.key
|
||||
dest: private/{{ IP_subject_alt_name }}.key
|
||||
owner: strongswan
|
||||
group: "{{ root_group|default('root') }}"
|
||||
mode: "0600"
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
- name: Setup the config files from our templates
|
||||
template:
|
||||
src: "{{ item.src }}"
|
||||
|
@ -9,22 +8,22 @@
|
|||
mode: "{{ item.mode }}"
|
||||
with_items:
|
||||
- src: strongswan.conf.j2
|
||||
dest: "strongswan.conf"
|
||||
dest: strongswan.conf
|
||||
owner: root
|
||||
group: "{{ root_group|default('root') }}"
|
||||
mode: "0644"
|
||||
- src: ipsec.conf.j2
|
||||
dest: "ipsec.conf"
|
||||
dest: ipsec.conf
|
||||
owner: root
|
||||
group: "{{ root_group|default('root') }}"
|
||||
mode: "0644"
|
||||
- src: ipsec.secrets.j2
|
||||
dest: "ipsec.secrets"
|
||||
dest: ipsec.secrets
|
||||
owner: strongswan
|
||||
group: "{{ root_group|default('root') }}"
|
||||
mode: "0600"
|
||||
- src: charon.conf.j2
|
||||
dest: "strongswan.d/charon.conf"
|
||||
dest: strongswan.d/charon.conf
|
||||
owner: root
|
||||
group: "{{ root_group|default('root') }}"
|
||||
mode: "0644"
|
||||
|
@ -44,8 +43,8 @@
|
|||
- name: Disable unneeded plugins
|
||||
lineinfile:
|
||||
dest: "{{ config_prefix|default('/') }}etc/strongswan.d/charon/{{ item }}.conf"
|
||||
regexp: '.*load.*'
|
||||
line: 'load = no'
|
||||
regexp: .*load.*
|
||||
line: load = no
|
||||
state: present
|
||||
notify:
|
||||
- restart strongswan
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
- import_tasks: distribute_keys.yml
|
||||
- import_tasks: client_configs.yml
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
become: false
|
||||
tags: update-users
|
||||
|
||||
- name: strongSwan started
|
||||
|
|
|
@ -1,239 +1,239 @@
|
|||
---
|
||||
- block:
|
||||
- debug: var=subjectAltName
|
||||
- debug: var=subjectAltName
|
||||
|
||||
- name: Ensure the pki directory does not exist
|
||||
file:
|
||||
dest: "{{ ipsec_pki_path }}"
|
||||
state: absent
|
||||
when: keys_clean_all|bool
|
||||
- name: Ensure the pki directory does not exist
|
||||
file:
|
||||
dest: "{{ ipsec_pki_path }}"
|
||||
state: absent
|
||||
when: keys_clean_all|bool
|
||||
|
||||
- name: Ensure the pki directories exist
|
||||
file:
|
||||
dest: "{{ ipsec_pki_path }}/{{ item }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
mode: '0700'
|
||||
with_items:
|
||||
- ecparams
|
||||
- certs
|
||||
- crl
|
||||
- newcerts
|
||||
- private
|
||||
- public
|
||||
- reqs
|
||||
- name: Ensure the pki directories exist
|
||||
file:
|
||||
dest: "{{ ipsec_pki_path }}/{{ item }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: "0700"
|
||||
with_items:
|
||||
- ecparams
|
||||
- certs
|
||||
- crl
|
||||
- newcerts
|
||||
- private
|
||||
- public
|
||||
- reqs
|
||||
|
||||
- name: Ensure the config directories exist
|
||||
file:
|
||||
dest: "{{ ipsec_config_path }}/{{ item }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
mode: '0700'
|
||||
with_items:
|
||||
- apple
|
||||
- manual
|
||||
- name: Ensure the config directories exist
|
||||
file:
|
||||
dest: "{{ ipsec_config_path }}/{{ item }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: "0700"
|
||||
with_items:
|
||||
- apple
|
||||
- manual
|
||||
|
||||
- name: Ensure the files exist
|
||||
file:
|
||||
dest: "{{ ipsec_pki_path }}/{{ item }}"
|
||||
state: touch
|
||||
with_items:
|
||||
- ".rnd"
|
||||
- "private/.rnd"
|
||||
- "index.txt"
|
||||
- "index.txt.attr"
|
||||
- "serial"
|
||||
- name: Ensure the files exist
|
||||
file:
|
||||
dest: "{{ ipsec_pki_path }}/{{ item }}"
|
||||
state: touch
|
||||
with_items:
|
||||
- .rnd
|
||||
- private/.rnd
|
||||
- index.txt
|
||||
- index.txt.attr
|
||||
- serial
|
||||
|
||||
- name: Generate the openssl server configs
|
||||
template:
|
||||
src: openssl.cnf.j2
|
||||
dest: "{{ ipsec_pki_path }}/openssl.cnf"
|
||||
- name: Generate the openssl server configs
|
||||
template:
|
||||
src: openssl.cnf.j2
|
||||
dest: "{{ ipsec_pki_path }}/openssl.cnf"
|
||||
|
||||
- name: Build the CA pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} ecparam -name secp384r1 -out ecparams/secp384r1.pem &&
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}"))
|
||||
-keyout private/cakey.pem
|
||||
-out cacert.pem -x509 -days 3650
|
||||
-batch
|
||||
-passout pass:"{{ CA_password }}" &&
|
||||
touch {{ IP_subject_alt_name }}_ca_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: "{{ IP_subject_alt_name }}_ca_generated"
|
||||
executable: bash
|
||||
- name: Build the CA pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} ecparam -name secp384r1 -out ecparams/secp384r1.pem &&
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}"))
|
||||
-keyout private/cakey.pem
|
||||
-out cacert.pem -x509 -days 3650
|
||||
-batch
|
||||
-passout pass:"{{ CA_password }}" &&
|
||||
touch {{ IP_subject_alt_name }}_ca_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: "{{ IP_subject_alt_name }}_ca_generated"
|
||||
executable: bash
|
||||
|
||||
- name: Copy the CA certificate
|
||||
copy:
|
||||
src: "{{ ipsec_pki_path }}/cacert.pem"
|
||||
dest: "{{ ipsec_config_path }}/manual/cacert.pem"
|
||||
- name: Copy the CA certificate
|
||||
copy:
|
||||
src: "{{ ipsec_pki_path }}/cacert.pem"
|
||||
dest: "{{ ipsec_config_path }}/manual/cacert.pem"
|
||||
|
||||
- name: Generate the serial number
|
||||
shell: echo 01 > serial && touch serial_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: serial_generated
|
||||
- name: Generate the serial number
|
||||
shell: echo 01 > serial && touch serial_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: serial_generated
|
||||
|
||||
- name: Build the server pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}"))
|
||||
-keyout private/{{ IP_subject_alt_name }}.key
|
||||
-out reqs/{{ IP_subject_alt_name }}.req -nodes
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ IP_subject_alt_name }}" -batch &&
|
||||
{{ openssl_bin }} ca -utf8
|
||||
-in reqs/{{ IP_subject_alt_name }}.req
|
||||
-out certs/{{ IP_subject_alt_name }}.crt
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}"))
|
||||
-days 3650 -batch
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ IP_subject_alt_name }}" &&
|
||||
touch certs/{{ IP_subject_alt_name }}_crt_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: certs/{{ IP_subject_alt_name }}_crt_generated
|
||||
executable: bash
|
||||
- name: Build the server pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}"))
|
||||
-keyout private/{{ IP_subject_alt_name }}.key
|
||||
-out reqs/{{ IP_subject_alt_name }}.req -nodes
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ IP_subject_alt_name }}" -batch &&
|
||||
{{ openssl_bin }} ca -utf8
|
||||
-in reqs/{{ IP_subject_alt_name }}.req
|
||||
-out certs/{{ IP_subject_alt_name }}.crt
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}"))
|
||||
-days 3650 -batch
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ IP_subject_alt_name }}" &&
|
||||
touch certs/{{ IP_subject_alt_name }}_crt_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: certs/{{ IP_subject_alt_name }}_crt_generated
|
||||
executable: bash
|
||||
|
||||
- name: Build the client's pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}"))
|
||||
-keyout private/{{ item }}.key
|
||||
-out reqs/{{ item }}.req -nodes
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ item }}" -batch &&
|
||||
{{ openssl_bin }} ca -utf8
|
||||
-in reqs/{{ item }}.req
|
||||
-out certs/{{ item }}.crt
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}"))
|
||||
-days 3650 -batch
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ item }}" &&
|
||||
touch certs/{{ item }}_crt_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: certs/{{ item }}_crt_generated
|
||||
executable: bash
|
||||
with_items: "{{ users }}"
|
||||
- name: Build the client's pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}"))
|
||||
-keyout private/{{ item }}.key
|
||||
-out reqs/{{ item }}.req -nodes
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ item }}" -batch &&
|
||||
{{ openssl_bin }} ca -utf8
|
||||
-in reqs/{{ item }}.req
|
||||
-out certs/{{ item }}.crt
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}"))
|
||||
-days 3650 -batch
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ item }}" &&
|
||||
touch certs/{{ item }}_crt_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: certs/{{ item }}_crt_generated
|
||||
executable: bash
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Build the tests pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:google-algo-test-pair.com"))
|
||||
-keyout private/google-algo-test-pair.com.key
|
||||
-out reqs/google-algo-test-pair.com.req -nodes
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN=google-algo-test-pair.com" -batch &&
|
||||
{{ openssl_bin }} ca -utf8
|
||||
-in reqs/google-algo-test-pair.com.req
|
||||
-out certs/google-algo-test-pair.com.crt
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:google-algo-test-pair.com"))
|
||||
-days 3650 -batch
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN=google-algo-test-pair.com" &&
|
||||
touch certs/google-algo-test-pair.com_crt_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: certs/google-algo-test-pair.com_crt_generated
|
||||
executable: bash
|
||||
when: tests|default(false)|bool
|
||||
- name: Build the tests pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:google-algo-test-pair.com"))
|
||||
-keyout private/google-algo-test-pair.com.key
|
||||
-out reqs/google-algo-test-pair.com.req -nodes
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN=google-algo-test-pair.com" -batch &&
|
||||
{{ openssl_bin }} ca -utf8
|
||||
-in reqs/google-algo-test-pair.com.req
|
||||
-out certs/google-algo-test-pair.com.crt
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:google-algo-test-pair.com"))
|
||||
-days 3650 -batch
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN=google-algo-test-pair.com" &&
|
||||
touch certs/google-algo-test-pair.com_crt_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: certs/google-algo-test-pair.com_crt_generated
|
||||
executable: bash
|
||||
when: tests|default(false)|bool
|
||||
|
||||
- name: Build openssh public keys
|
||||
openssl_publickey:
|
||||
path: "{{ ipsec_pki_path }}/public/{{ item }}.pub"
|
||||
privatekey_path: "{{ ipsec_pki_path }}/private/{{ item }}.key"
|
||||
format: OpenSSH
|
||||
with_items: "{{ users }}"
|
||||
- name: Build openssh public keys
|
||||
openssl_publickey:
|
||||
path: "{{ ipsec_pki_path }}/public/{{ item }}.pub"
|
||||
privatekey_path: "{{ ipsec_pki_path }}/private/{{ item }}.key"
|
||||
format: OpenSSH
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Build the client's p12
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} pkcs12
|
||||
-in certs/{{ item }}.crt
|
||||
-inkey private/{{ item }}.key
|
||||
-export
|
||||
-name {{ item }}
|
||||
-out private/{{ item }}.p12
|
||||
-passout pass:"{{ p12_export_password }}"
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
executable: bash
|
||||
with_items: "{{ users }}"
|
||||
register: p12
|
||||
- name: Build the client's p12
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} pkcs12
|
||||
-in certs/{{ item }}.crt
|
||||
-inkey private/{{ item }}.key
|
||||
-export
|
||||
-name {{ item }}
|
||||
-out private/{{ item }}.p12
|
||||
-passout pass:"{{ p12_export_password }}"
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
executable: bash
|
||||
with_items: "{{ users }}"
|
||||
register: p12
|
||||
|
||||
- name: Build the client's p12 with the CA cert included
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} pkcs12
|
||||
-in certs/{{ item }}.crt
|
||||
-inkey private/{{ item }}.key
|
||||
-export
|
||||
-name {{ item }}
|
||||
-out private/{{ item }}_ca.p12
|
||||
-certfile cacert.pem
|
||||
-passout pass:"{{ p12_export_password }}"
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
executable: bash
|
||||
with_items: "{{ users }}"
|
||||
register: p12
|
||||
- name: Build the client's p12 with the CA cert included
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} pkcs12
|
||||
-in certs/{{ item }}.crt
|
||||
-inkey private/{{ item }}.key
|
||||
-export
|
||||
-name {{ item }}
|
||||
-out private/{{ item }}_ca.p12
|
||||
-certfile cacert.pem
|
||||
-passout pass:"{{ p12_export_password }}"
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
executable: bash
|
||||
with_items: "{{ users }}"
|
||||
register: p12
|
||||
|
||||
- name: Copy the p12 certificates
|
||||
copy:
|
||||
src: "{{ ipsec_pki_path }}/private/{{ item }}.p12"
|
||||
dest: "{{ ipsec_config_path }}/manual/{{ item }}.p12"
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- name: Copy the p12 certificates
|
||||
copy:
|
||||
src: "{{ ipsec_pki_path }}/private/{{ item }}.p12"
|
||||
dest: "{{ ipsec_config_path }}/manual/{{ item }}.p12"
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
|
||||
- name: Get active users
|
||||
shell: >
|
||||
grep ^V index.txt |
|
||||
grep -v "{{ IP_subject_alt_name }}" |
|
||||
awk '{print $5}' |
|
||||
sed 's/\/CN=//g'
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
register: valid_certs
|
||||
- name: Get active users
|
||||
shell: >
|
||||
grep ^V index.txt |
|
||||
grep -v "{{ IP_subject_alt_name }}" |
|
||||
awk '{print $5}' |
|
||||
sed 's/\/CN=//g'
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
register: valid_certs
|
||||
|
||||
- name: Revoke non-existing users
|
||||
shell: >
|
||||
{{ openssl_bin }} ca -gencrl
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}"))
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-revoke certs/{{ item }}.crt
|
||||
-out crl/{{ item }}.crt
|
||||
register: gencrl
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: crl/{{ item }}.crt
|
||||
executable: bash
|
||||
when: item.split('@')[0] not in users
|
||||
with_items: "{{ valid_certs.stdout_lines }}"
|
||||
- name: Revoke non-existing users
|
||||
shell: >
|
||||
{{ openssl_bin }} ca -gencrl
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}"))
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-revoke certs/{{ item }}.crt
|
||||
-out crl/{{ item }}.crt
|
||||
register: gencrl
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: crl/{{ item }}.crt
|
||||
executable: bash
|
||||
when: item.split('@')[0] not in users
|
||||
with_items: "{{ valid_certs.stdout_lines }}"
|
||||
|
||||
- name: Genereate new CRL file
|
||||
shell: >
|
||||
{{ openssl_bin }} ca -gencrl
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:{{ IP_subject_alt_name }}"))
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-out crl/algo.root.pem
|
||||
when:
|
||||
- gencrl is defined
|
||||
- gencrl.changed
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
executable: bash
|
||||
- name: Generate new CRL file
|
||||
shell: >
|
||||
{{ openssl_bin }} ca -gencrl
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:{{ IP_subject_alt_name }}"))
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-out crl/algo.root.pem
|
||||
when:
|
||||
- gencrl is defined
|
||||
- gencrl.changed
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
executable: bash
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
become: false
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ ansible_playbook_python }}"
|
||||
|
||||
|
|
|
@ -2,32 +2,31 @@
|
|||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
strongswan_additional_plugins: []
|
||||
|
||||
- name: Ubuntu | Install strongSwan
|
||||
apt:
|
||||
name: strongswan
|
||||
state: present
|
||||
update_cache: yes
|
||||
install_recommends: yes
|
||||
update_cache: true
|
||||
install_recommends: true
|
||||
|
||||
- block:
|
||||
# https://bugs.launchpad.net/ubuntu/+source/strongswan/+bug/1826238
|
||||
- name: Ubuntu | Charon profile for apparmor configured
|
||||
copy:
|
||||
dest: /etc/apparmor.d/local/usr.lib.ipsec.charon
|
||||
content: ' capability setpcap,'
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
notify: restart strongswan
|
||||
# https://bugs.launchpad.net/ubuntu/+source/strongswan/+bug/1826238
|
||||
- name: Ubuntu | Charon profile for apparmor configured
|
||||
copy:
|
||||
dest: /etc/apparmor.d/local/usr.lib.ipsec.charon
|
||||
content: " capability setpcap,"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
notify: restart strongswan
|
||||
|
||||
- name: Ubuntu | Enforcing ipsec with apparmor
|
||||
command: aa-enforce "{{ item }}"
|
||||
changed_when: false
|
||||
with_items:
|
||||
- /usr/lib/ipsec/charon
|
||||
- /usr/lib/ipsec/lookip
|
||||
- /usr/lib/ipsec/stroke
|
||||
- name: Ubuntu | Enforcing ipsec with apparmor
|
||||
command: aa-enforce "{{ item }}"
|
||||
changed_when: false
|
||||
with_items:
|
||||
- /usr/lib/ipsec/charon
|
||||
- /usr/lib/ipsec/lookip
|
||||
- /usr/lib/ipsec/stroke
|
||||
tags: apparmor
|
||||
when: apparmor_enabled|default(false)|bool
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ crl = $dir/crl.pem # The current CRL
|
|||
private_key = $dir/private/cakey.pem # The private key
|
||||
RANDFILE = $dir/private/.rand # private random number file
|
||||
|
||||
x509_extensions = basic_exts # The extentions to add to the cert
|
||||
x509_extensions = basic_exts # The extensions to add to the cert
|
||||
|
||||
# This allows a V2 CRL. Ancient browsers don't like it, but anything Easy-RSA
|
||||
# is designed for will. In return, we get the Issuer attached to CRLs.
|
||||
|
@ -56,7 +56,7 @@ default_bits = 2048
|
|||
default_keyfile = privkey.pem
|
||||
default_md = sha256
|
||||
distinguished_name = cn_only
|
||||
x509_extensions = easyrsa_ca # The extentions to add to the self signed cert
|
||||
x509_extensions = easyrsa_ca # The extensions to add to the self signed cert
|
||||
|
||||
# A placeholder to handle the $EXTRA_EXTS feature:
|
||||
#%EXTRA_EXTS% # Do NOT remove or change this line as $EXTRA_EXTS support requires it
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
wireguard_PersistentKeepalive: 0
|
||||
wireguard_config_path: "configs/{{ IP_subject_alt_name }}/wireguard/"
|
||||
wireguard_config_path: configs/{{ IP_subject_alt_name }}/wireguard/
|
||||
wireguard_pki_path: "{{ wireguard_config_path }}/.pki/"
|
||||
wireguard_interface: wg0
|
||||
wireguard_port_avoid: 53
|
||||
|
@ -10,7 +10,8 @@ wireguard_dns_servers: >-
|
|||
{% if algo_dns_adblocking|default(false)|bool or dns_encryption|default(false)|bool %}
|
||||
{{ local_service_ip }}{{ ', ' + local_service_ipv6 if ipv6_support else '' }}
|
||||
{% else %}
|
||||
{% for host in dns_servers.ipv4 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% if ipv6_support %},{% for host in dns_servers.ipv6 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
|
||||
{% for host in dns_servers.ipv4 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% if ipv6_support %},{% for host in dns_servers.ipv6 %}{{ host }}{% if
|
||||
not loop.last %},{% endif %}{% endfor %}{% endif %}
|
||||
{% endif %}
|
||||
wireguard_client_ip: >-
|
||||
{{ wireguard_network_ipv4 | ipmath(index|int+2) }}
|
||||
|
|
|
@ -18,24 +18,24 @@
|
|||
- "{{ IP_subject_alt_name }}"
|
||||
|
||||
- block:
|
||||
- name: Save private keys
|
||||
copy:
|
||||
dest: "{{ wireguard_pki_path }}/private/{{ item['item'] }}"
|
||||
content: "{{ item['stdout'] }}"
|
||||
mode: "0600"
|
||||
no_log: "{{ no_log|bool }}"
|
||||
when: item.changed
|
||||
with_items: "{{ wg_genkey['results'] }}"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
- name: Save private keys
|
||||
copy:
|
||||
dest: "{{ wireguard_pki_path }}/private/{{ item['item'] }}"
|
||||
content: "{{ item['stdout'] }}"
|
||||
mode: "0600"
|
||||
no_log: "{{ no_log|bool }}"
|
||||
when: item.changed
|
||||
with_items: "{{ wg_genkey['results'] }}"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: Touch the lock file
|
||||
file:
|
||||
dest: "{{ config_prefix|default('/') }}etc/wireguard/private_{{ item }}.lock"
|
||||
state: touch
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- "{{ IP_subject_alt_name }}"
|
||||
- name: Touch the lock file
|
||||
file:
|
||||
dest: "{{ config_prefix|default('/') }}etc/wireguard/private_{{ item }}.lock"
|
||||
state: touch
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- "{{ IP_subject_alt_name }}"
|
||||
when: wg_genkey.changed
|
||||
|
||||
- name: Delete the preshared lock files
|
||||
|
@ -57,24 +57,24 @@
|
|||
- "{{ IP_subject_alt_name }}"
|
||||
|
||||
- block:
|
||||
- name: Save preshared keys
|
||||
copy:
|
||||
dest: "{{ wireguard_pki_path }}/preshared/{{ item['item'] }}"
|
||||
content: "{{ item['stdout'] }}"
|
||||
mode: "0600"
|
||||
no_log: "{{ no_log|bool }}"
|
||||
when: item.changed
|
||||
with_items: "{{ wg_genpsk['results'] }}"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
- name: Save preshared keys
|
||||
copy:
|
||||
dest: "{{ wireguard_pki_path }}/preshared/{{ item['item'] }}"
|
||||
content: "{{ item['stdout'] }}"
|
||||
mode: "0600"
|
||||
no_log: "{{ no_log|bool }}"
|
||||
when: item.changed
|
||||
with_items: "{{ wg_genpsk['results'] }}"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: Touch the preshared lock file
|
||||
file:
|
||||
dest: "{{ config_prefix|default('/') }}etc/wireguard/preshared_{{ item }}.lock"
|
||||
state: touch
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- "{{ IP_subject_alt_name }}"
|
||||
- name: Touch the preshared lock file
|
||||
file:
|
||||
dest: "{{ config_prefix|default('/') }}etc/wireguard/preshared_{{ item }}.lock"
|
||||
state: touch
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- "{{ IP_subject_alt_name }}"
|
||||
when: wg_genpsk.changed
|
||||
|
||||
- name: Generate public keys
|
||||
|
|
|
@ -28,61 +28,61 @@
|
|||
tags: update-users
|
||||
|
||||
- block:
|
||||
- block:
|
||||
- name: WireGuard user list updated
|
||||
lineinfile:
|
||||
dest: "{{ wireguard_pki_path }}/index.txt"
|
||||
create: true
|
||||
mode: "0600"
|
||||
insertafter: EOF
|
||||
line: "{{ item }}"
|
||||
register: lineinfile
|
||||
with_items: "{{ users }}"
|
||||
- block:
|
||||
- name: WireGuard user list updated
|
||||
lineinfile:
|
||||
dest: "{{ wireguard_pki_path }}/index.txt"
|
||||
create: true
|
||||
mode: "0600"
|
||||
insertafter: EOF
|
||||
line: "{{ item }}"
|
||||
register: lineinfile
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- set_fact:
|
||||
wireguard_users: "{{ (lookup('file', wireguard_pki_path + 'index.txt')).split('\n') }}"
|
||||
- set_fact:
|
||||
wireguard_users: "{{ (lookup('file', wireguard_pki_path + 'index.txt')).split('\n') }}"
|
||||
|
||||
- name: WireGuard users config generated
|
||||
- name: WireGuard users config generated
|
||||
template:
|
||||
src: client.conf.j2
|
||||
dest: "{{ wireguard_config_path }}/{{ item.1 }}.conf"
|
||||
mode: "0600"
|
||||
with_indexed_items: "{{ wireguard_users }}"
|
||||
when: item.1 in users
|
||||
vars:
|
||||
index: "{{ item.0 }}"
|
||||
|
||||
- include_tasks: mobileconfig.yml
|
||||
loop:
|
||||
- ios
|
||||
- macos
|
||||
loop_control:
|
||||
loop_var: system
|
||||
|
||||
- name: Generate QR codes
|
||||
shell: >
|
||||
umask 077;
|
||||
which segno &&
|
||||
segno --scale=5 --output={{ item.1 }}.png \
|
||||
"{{ lookup('template', 'client.conf.j2') }}" || true
|
||||
changed_when: false
|
||||
with_indexed_items: "{{ wireguard_users }}"
|
||||
when: item.1 in users
|
||||
vars:
|
||||
index: "{{ item.0 }}"
|
||||
ansible_python_interpreter: "{{ ansible_playbook_python }}"
|
||||
args:
|
||||
chdir: "{{ wireguard_config_path }}"
|
||||
executable: bash
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
|
||||
- name: WireGuard configured
|
||||
template:
|
||||
src: client.conf.j2
|
||||
dest: "{{ wireguard_config_path }}/{{ item.1 }}.conf"
|
||||
src: server.conf.j2
|
||||
dest: "{{ config_prefix|default('/') }}etc/wireguard/{{ wireguard_interface }}.conf"
|
||||
mode: "0600"
|
||||
with_indexed_items: "{{ wireguard_users }}"
|
||||
when: item.1 in users
|
||||
vars:
|
||||
index: "{{ item.0 }}"
|
||||
|
||||
- include_tasks: mobileconfig.yml
|
||||
loop:
|
||||
- ios
|
||||
- macos
|
||||
loop_control:
|
||||
loop_var: system
|
||||
|
||||
- name: Generate QR codes
|
||||
shell: >
|
||||
umask 077;
|
||||
which segno &&
|
||||
segno --scale=5 --output={{ item.1 }}.png \
|
||||
"{{ lookup('template', 'client.conf.j2') }}" || true
|
||||
changed_when: false
|
||||
with_indexed_items: "{{ wireguard_users }}"
|
||||
when: item.1 in users
|
||||
vars:
|
||||
index: "{{ item.0 }}"
|
||||
ansible_python_interpreter: "{{ ansible_playbook_python }}"
|
||||
args:
|
||||
chdir: "{{ wireguard_config_path }}"
|
||||
executable: bash
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
|
||||
- name: WireGuard configured
|
||||
template:
|
||||
src: server.conf.j2
|
||||
dest: "{{ config_prefix|default('/') }}etc/wireguard/{{ wireguard_interface }}.conf"
|
||||
mode: "0600"
|
||||
notify: restart wireguard
|
||||
notify: restart wireguard
|
||||
tags: update-users
|
||||
|
||||
- name: WireGuard enabled and started
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
src: mobileconfig.j2
|
||||
dest: "{{ wireguard_config_path }}/apple/{{ system }}/{{ item.1 }}.mobileconfig"
|
||||
mode: "0600"
|
||||
with_indexed_items: "{{ wireguard_users }}"
|
||||
with_indexed_items: "{{ wireguard_users }}"
|
||||
when: item.1 in users
|
||||
vars:
|
||||
index: "{{ item.0 }}"
|
||||
|
|
|
@ -7,5 +7,5 @@
|
|||
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
service_name: "wg-quick@{{ wireguard_interface }}"
|
||||
service_name: wg-quick@{{ wireguard_interface }}
|
||||
tags: always
|
||||
|
|
207
server.yml
207
server.yml
|
@ -7,117 +7,116 @@
|
|||
- config.cfg
|
||||
tasks:
|
||||
- block:
|
||||
- name: Wait until the cloud-init completed
|
||||
wait_for:
|
||||
path: /var/lib/cloud/data/result.json
|
||||
delay: 10
|
||||
timeout: 600
|
||||
state: present
|
||||
become: false
|
||||
when: cloudinit
|
||||
- name: Wait until the cloud-init completed
|
||||
wait_for:
|
||||
path: /var/lib/cloud/data/result.json
|
||||
delay: 10
|
||||
timeout: 600
|
||||
state: present
|
||||
become: false
|
||||
when: cloudinit
|
||||
|
||||
- block:
|
||||
- name: Ensure the config directory exists
|
||||
file:
|
||||
dest: "configs/{{ IP_subject_alt_name }}"
|
||||
state: directory
|
||||
mode: "0700"
|
||||
- block:
|
||||
- name: Ensure the config directory exists
|
||||
file:
|
||||
dest: configs/{{ IP_subject_alt_name }}
|
||||
state: directory
|
||||
mode: "0700"
|
||||
|
||||
- name: Dump the ssh config
|
||||
copy:
|
||||
dest: "configs/{{ IP_subject_alt_name }}/ssh_config"
|
||||
mode: "0600"
|
||||
content: |
|
||||
Host {{ IP_subject_alt_name }} {{ algo_server_name }}
|
||||
HostName {{ IP_subject_alt_name }}
|
||||
User {{ ansible_ssh_user }}
|
||||
Port {{ ansible_ssh_port }}
|
||||
IdentityFile {{ SSH_keys.private | realpath }}
|
||||
KeepAlive yes
|
||||
ServerAliveInterval 30
|
||||
when: inventory_hostname != 'localhost'
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
|
||||
- import_role:
|
||||
name: common
|
||||
tags: common
|
||||
|
||||
- import_role:
|
||||
name: dns
|
||||
when:
|
||||
- algo_dns_adblocking or
|
||||
dns_encryption
|
||||
tags: dns
|
||||
|
||||
- import_role:
|
||||
name: wireguard
|
||||
when: wireguard_enabled
|
||||
tags: wireguard
|
||||
|
||||
- import_role:
|
||||
name: strongswan
|
||||
when: ipsec_enabled
|
||||
tags: ipsec
|
||||
|
||||
- import_role:
|
||||
name: ssh_tunneling
|
||||
when: algo_ssh_tunneling
|
||||
tags: ssh_tunneling
|
||||
|
||||
- block:
|
||||
- name: Dump the configuration
|
||||
copy:
|
||||
dest: "configs/{{ IP_subject_alt_name }}/.config.yml"
|
||||
content: |
|
||||
server: {{ 'localhost' if inventory_hostname == 'localhost' else inventory_hostname }}
|
||||
server_user: {{ ansible_ssh_user }}
|
||||
ansible_ssh_port: "{{ ansible_ssh_port|default(22) }}"
|
||||
{% if algo_provider != "local" %}
|
||||
ansible_ssh_private_key_file: {{ SSH_keys.private }}
|
||||
{% endif %}
|
||||
algo_provider: {{ algo_provider }}
|
||||
algo_server_name: {{ algo_server_name }}
|
||||
algo_ondemand_cellular: {{ algo_ondemand_cellular }}
|
||||
algo_ondemand_wifi: {{ algo_ondemand_wifi }}
|
||||
algo_ondemand_wifi_exclude: {{ algo_ondemand_wifi_exclude }}
|
||||
algo_dns_adblocking: {{ algo_dns_adblocking }}
|
||||
algo_ssh_tunneling: {{ algo_ssh_tunneling }}
|
||||
algo_store_pki: {{ algo_store_pki }}
|
||||
IP_subject_alt_name: {{ IP_subject_alt_name }}
|
||||
ipsec_enabled: {{ ipsec_enabled }}
|
||||
wireguard_enabled: {{ wireguard_enabled }}
|
||||
{% if tests|default(false)|bool %}
|
||||
ca_password: '{{ CA_password }}'
|
||||
p12_password: '{{ p12_export_password }}'
|
||||
{% endif %}
|
||||
- name: Dump the ssh config
|
||||
copy:
|
||||
dest: configs/{{ IP_subject_alt_name }}/ssh_config
|
||||
mode: "0600"
|
||||
content: |
|
||||
Host {{ IP_subject_alt_name }} {{ algo_server_name }}
|
||||
HostName {{ IP_subject_alt_name }}
|
||||
User {{ ansible_ssh_user }}
|
||||
Port {{ ansible_ssh_port }}
|
||||
IdentityFile {{ SSH_keys.private | realpath }}
|
||||
KeepAlive yes
|
||||
ServerAliveInterval 30
|
||||
when: inventory_hostname != 'localhost'
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create a symlink if deploying to localhost
|
||||
file:
|
||||
src: "{{ IP_subject_alt_name }}"
|
||||
dest: configs/localhost
|
||||
state: link
|
||||
force: true
|
||||
when: inventory_hostname == 'localhost'
|
||||
- import_role:
|
||||
name: common
|
||||
tags: common
|
||||
|
||||
- name: Import tmpfs tasks
|
||||
import_tasks: playbooks/tmpfs/umount.yml
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
vars:
|
||||
facts: "{{ hostvars['localhost'] }}"
|
||||
- import_role:
|
||||
name: dns
|
||||
when:
|
||||
- pki_in_tmpfs
|
||||
- not algo_store_pki
|
||||
- algo_dns_adblocking or dns_encryption
|
||||
tags: dns
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass if algo_ssh_tunneling or ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ca_key_pass if algo_store_pki and ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ssh_access if algo_provider != 'local' else ''}}"
|
||||
tags: always
|
||||
- import_role:
|
||||
name: wireguard
|
||||
when: wireguard_enabled
|
||||
tags: wireguard
|
||||
|
||||
- import_role:
|
||||
name: strongswan
|
||||
when: ipsec_enabled
|
||||
tags: ipsec
|
||||
|
||||
- import_role:
|
||||
name: ssh_tunneling
|
||||
when: algo_ssh_tunneling
|
||||
tags: ssh_tunneling
|
||||
|
||||
- block:
|
||||
- name: Dump the configuration
|
||||
copy:
|
||||
dest: configs/{{ IP_subject_alt_name }}/.config.yml
|
||||
content: |
|
||||
server: {{ 'localhost' if inventory_hostname == 'localhost' else inventory_hostname }}
|
||||
server_user: {{ ansible_ssh_user }}
|
||||
ansible_ssh_port: "{{ ansible_ssh_port|default(22) }}"
|
||||
{% if algo_provider != "local" %}
|
||||
ansible_ssh_private_key_file: {{ SSH_keys.private }}
|
||||
{% endif %}
|
||||
algo_provider: {{ algo_provider }}
|
||||
algo_server_name: {{ algo_server_name }}
|
||||
algo_ondemand_cellular: {{ algo_ondemand_cellular }}
|
||||
algo_ondemand_wifi: {{ algo_ondemand_wifi }}
|
||||
algo_ondemand_wifi_exclude: {{ algo_ondemand_wifi_exclude }}
|
||||
algo_dns_adblocking: {{ algo_dns_adblocking }}
|
||||
algo_ssh_tunneling: {{ algo_ssh_tunneling }}
|
||||
algo_store_pki: {{ algo_store_pki }}
|
||||
IP_subject_alt_name: {{ IP_subject_alt_name }}
|
||||
ipsec_enabled: {{ ipsec_enabled }}
|
||||
wireguard_enabled: {{ wireguard_enabled }}
|
||||
{% if tests|default(false)|bool %}
|
||||
ca_password: '{{ CA_password }}'
|
||||
p12_password: '{{ p12_export_password }}'
|
||||
{% endif %}
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create a symlink if deploying to localhost
|
||||
file:
|
||||
src: "{{ IP_subject_alt_name }}"
|
||||
dest: configs/localhost
|
||||
state: link
|
||||
force: true
|
||||
when: inventory_hostname == 'localhost'
|
||||
|
||||
- name: Import tmpfs tasks
|
||||
import_tasks: playbooks/tmpfs/umount.yml
|
||||
become: false
|
||||
delegate_to: localhost
|
||||
vars:
|
||||
facts: "{{ hostvars['localhost'] }}"
|
||||
when:
|
||||
- pki_in_tmpfs
|
||||
- not algo_store_pki
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass if algo_ssh_tunneling or ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ca_key_pass if algo_store_pki and ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ssh_access if algo_provider != 'local' else ''}}"
|
||||
tags: always
|
||||
rescue:
|
||||
- include_tasks: playbooks/rescue.yml
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
dhcp-host=algo,10.0.8.100
|
|
@ -1,16 +0,0 @@
|
|||
USE_LXD_BRIDGE="true"
|
||||
LXD_BRIDGE="lxdbr0"
|
||||
UPDATE_PROFILE="true"
|
||||
LXD_CONFILE="/etc/default/algo.conf"
|
||||
LXD_DOMAIN="lxd"
|
||||
LXD_IPV4_ADDR="10.0.8.1"
|
||||
LXD_IPV4_NETMASK="255.255.255.0"
|
||||
LXD_IPV4_NETWORK="10.0.8.0/24"
|
||||
LXD_IPV4_DHCP_RANGE="10.0.8.2,10.0.8.254"
|
||||
LXD_IPV4_DHCP_MAX="250"
|
||||
LXD_IPV4_NAT="true"
|
||||
LXD_IPV6_ADDR=""
|
||||
LXD_IPV6_MASK=""
|
||||
LXD_IPV6_NETWORK=""
|
||||
LXD_IPV6_NAT="false"
|
||||
LXD_IPV6_PROXY="false"
|
|
@ -4,10 +4,6 @@ set -euxo pipefail
|
|||
|
||||
sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
|
||||
tar xf $HOME/lxc/cache.tar -C / || echo "Didn't extract cache."
|
||||
cp -f tests/lxd-bridge /etc/default/lxd-bridge
|
||||
cp -f tests/algo.conf /etc/default/algo.conf
|
||||
|
||||
export REPOSITORY=${REPOSITORY:-${GITHUB_REPOSITORY}}
|
||||
export _BRANCH=${BRANCH#refs/heads/}
|
||||
export BRANCH=${_BRANCH:-${GITHUB_REF#refs/heads/}}
|
||||
|
@ -18,16 +14,16 @@ else
|
|||
echo -e "#cloud-config\nssh_authorized_keys:\n - $(cat ~/.ssh/id_rsa.pub)" | lxc profile set default user.user-data -
|
||||
fi
|
||||
|
||||
systemctl restart lxd-bridge.service lxd-containers.service lxd.service
|
||||
lxc network set lxdbr0 ipv4.address 10.0.8.1/24
|
||||
|
||||
lxc profile set default raw.lxc lxc.aa_profile=unconfined
|
||||
lxc profile set default raw.lxc 'lxc.apparmor.profile = unconfined'
|
||||
lxc profile set default security.privileged true
|
||||
lxc profile show default
|
||||
lxc launch ubuntu:${UBUNTU_VERSION} algo
|
||||
|
||||
if [[ ${UBUNTU_VERSION} == "20.04" ]]; then
|
||||
lxc exec algo -- apt remove snapd --purge -y || true
|
||||
fi
|
||||
lxc init ubuntu:${UBUNTU_VERSION} algo
|
||||
lxc network attach lxdbr0 algo eth0 eth0
|
||||
lxc config device set algo eth0 ipv4.address 10.0.8.100
|
||||
lxc start algo
|
||||
|
||||
ip addr
|
||||
|
||||
|
@ -35,4 +31,13 @@ until dig A +short algo.lxd @10.0.8.1 | grep -vE '^$' > /dev/null; do
|
|||
sleep 3
|
||||
done
|
||||
|
||||
case ${UBUNTU_VERSION} in
|
||||
20.04)
|
||||
lxc exec algo -- apt remove snapd --purge -y || true
|
||||
;;
|
||||
18.04)
|
||||
lxc exec algo -- apt install python3.8 -y
|
||||
;;
|
||||
esac
|
||||
|
||||
lxc list
|
||||
|
|
72
users.yml
72
users.yml
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
gather_facts: false
|
||||
tags: always
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
@ -13,7 +13,7 @@
|
|||
depth: 2
|
||||
recurse: true
|
||||
hidden: true
|
||||
patterns: ".config.yml"
|
||||
patterns: .config.yml
|
||||
register: _configs_list
|
||||
|
||||
- name: Verify servers
|
||||
|
@ -50,23 +50,23 @@
|
|||
|
||||
- name: Import host specific variables
|
||||
include_vars:
|
||||
file: "configs/{{ algo_server }}/.config.yml"
|
||||
file: configs/{{ algo_server }}/.config.yml
|
||||
|
||||
- when: ipsec_enabled
|
||||
block:
|
||||
- name: CA password prompt
|
||||
pause:
|
||||
prompt: Enter the password for the private CA key
|
||||
echo: false
|
||||
register: _ca_password
|
||||
when: ca_password is undefined
|
||||
- name: CA password prompt
|
||||
pause:
|
||||
prompt: Enter the password for the private CA key
|
||||
echo: false
|
||||
register: _ca_password
|
||||
when: ca_password is undefined
|
||||
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
CA_password: >-
|
||||
{% if ca_password is defined %}{{ ca_password }}
|
||||
{%- elif _ca_password.user_input %}{{ _ca_password.user_input }}
|
||||
{%- else %}omit{% endif %}
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
CA_password: >-
|
||||
{% if ca_password is defined %}{{ ca_password }}
|
||||
{%- elif _ca_password.user_input %}{{ _ca_password.user_input }}
|
||||
{%- else %}omit{% endif %}
|
||||
|
||||
- name: Local pre-tasks
|
||||
import_tasks: playbooks/cloud-pre.yml
|
||||
|
@ -78,7 +78,7 @@
|
|||
groups: vpn-host
|
||||
ansible_ssh_user: "{{ server_user|default('root') }}"
|
||||
ansible_connection: "{% if algo_server == 'localhost' %}local{% else %}ssh{% endif %}"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
CA_password: "{{ CA_password|default(omit) }}"
|
||||
rescue:
|
||||
- include_tasks: playbooks/rescue.yml
|
||||
|
@ -89,32 +89,32 @@
|
|||
become: true
|
||||
vars_files:
|
||||
- config.cfg
|
||||
- "configs/{{ inventory_hostname }}/.config.yml"
|
||||
- configs/{{ inventory_hostname }}/.config.yml
|
||||
|
||||
tasks:
|
||||
- block:
|
||||
- import_role:
|
||||
name: common
|
||||
- import_role:
|
||||
name: common
|
||||
|
||||
- import_role:
|
||||
name: wireguard
|
||||
when: wireguard_enabled
|
||||
- import_role:
|
||||
name: wireguard
|
||||
when: wireguard_enabled
|
||||
|
||||
- import_role:
|
||||
name: strongswan
|
||||
when: ipsec_enabled
|
||||
tags: ipsec
|
||||
- import_role:
|
||||
name: strongswan
|
||||
when: ipsec_enabled
|
||||
tags: ipsec
|
||||
|
||||
- import_role:
|
||||
name: ssh_tunneling
|
||||
when: algo_ssh_tunneling
|
||||
- import_role:
|
||||
name: ssh_tunneling
|
||||
when: algo_ssh_tunneling
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass if algo_ssh_tunneling or ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ca_key_pass if algo_store_pki and ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ssh_access if algo_provider != 'local' else ''}}"
|
||||
tags: always
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass if algo_ssh_tunneling or ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ca_key_pass if algo_store_pki and ipsec_enabled else '' }}"
|
||||
- " {{ congrats.ssh_access if algo_provider != 'local' else ''}}"
|
||||
tags: always
|
||||
rescue:
|
||||
- include_tasks: playbooks/rescue.yml
|
||||
|
|
Loading…
Add table
Reference in a new issue