36
.github/ISSUE_TEMPLATE.md
vendored
|
@ -1,36 +0,0 @@
|
|||
### OS / Environment (where do you run Algo on)
|
||||
<!---
|
||||
Run the command `uname -a` and put the output here
|
||||
-->
|
||||
|
||||
```
|
||||
PUT THE OUTPUT HERE
|
||||
```
|
||||
|
||||
### Cloud Provider (where do you deploy Algo to)
|
||||
<!---
|
||||
If you deploy to a cloud provider specify the provider here.
|
||||
--->
|
||||
|
||||
```
|
||||
PUT THE OUTPUT HERE
|
||||
```
|
||||
|
||||
### Summary of the problem
|
||||
<!--- Describe the problem -->
|
||||
|
||||
|
||||
|
||||
### Steps to reproduce the behavior
|
||||
<!--- Describe the steps how to reproduce the problem. -->
|
||||
|
||||
1. Do this..
|
||||
2. Do that..
|
||||
3.
|
||||
|
||||
### Full log
|
||||
<!--- Put here the FULL LOG after you run the ./algo script -->
|
||||
|
||||
```
|
||||
PUT THE OUTPUT HERE
|
||||
```
|
32
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
Steps to reproduce the behavior:
|
||||
1. Do this..
|
||||
2. Do that..
|
||||
3. ..
|
||||
|
||||
**Expected behavior**
|
||||
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Additional context**
|
||||
|
||||
Add any other context about the problem here.
|
||||
|
||||
**Full log**
|
||||
|
||||
<!--- Put here the FULL LOG after you run the ./algo script below -->
|
||||
|
||||
```
|
||||
PUT THE OUTPUT HERE
|
||||
```
|
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
54
.travis.yml
|
@ -13,15 +13,23 @@ matrix:
|
|||
addons:
|
||||
apt:
|
||||
sources:
|
||||
- sourceline: 'ppa:ubuntu-lxc/stable'
|
||||
- sourceline: 'ppa:ubuntu-lxc/stable'
|
||||
- sourceline: 'ppa:wireguard/wireguard'
|
||||
packages:
|
||||
- python-pip
|
||||
- lxc
|
||||
- lxc-templates
|
||||
- expect-dev
|
||||
- debootstrap
|
||||
- shellcheck
|
||||
- tree
|
||||
- python-pip
|
||||
- lxd
|
||||
- expect-dev
|
||||
- debootstrap
|
||||
- shellcheck
|
||||
- tree
|
||||
- bridge-utils
|
||||
- dnsutils
|
||||
- build-essential
|
||||
- libssl-dev
|
||||
- libffi-dev
|
||||
- python-dev
|
||||
- linux-headers-$(uname -r)
|
||||
- wireguard-dkms
|
||||
|
||||
cache:
|
||||
directories:
|
||||
|
@ -30,29 +38,25 @@ cache:
|
|||
|
||||
before_cache:
|
||||
- mkdir $HOME/lxc
|
||||
- sudo tar cf $HOME/lxc/cache.tar /var/cache/lxc/
|
||||
- sudo tar cf $HOME/lxc/cache.tar /var/lib/lxd/images/
|
||||
- sudo chown $USER. $HOME/lxc/cache.tar
|
||||
|
||||
env:
|
||||
- LXC_NAME=ubuntu1604 LXC_DISTRO=ubuntu LXC_RELEASE=xenial
|
||||
- LXC_NAME=ubuntu1710 LXC_DISTRO=ubuntu LXC_RELEASE=artful
|
||||
- LXC_NAME=docker LXC_DISTRO=ubuntu LXC_RELEASE=artful
|
||||
- LXC_NAME=docker LXC_DISTRO=ubuntu LXC_RELEASE=18.04
|
||||
|
||||
before_install:
|
||||
- test "${LXC_NAME}" != "docker" || docker build -t travis/algo .
|
||||
- test "${LXC_NAME}" != "docker" && sudo modprobe wireguard || docker build -t travis/algo .
|
||||
|
||||
install:
|
||||
- sudo tar xf $HOME/lxc/cache.tar -C / || echo "Didn't extract cache."
|
||||
- export LXC_ROOTFS=/var/lib/lxc/$LXC_NAME/rootfs
|
||||
- 'sudo lxc-create -n $LXC_NAME -t ubuntu -- -r $LXC_RELEASE --mirror http://mirrors.us.kernel.org/ubuntu --packages python || true'
|
||||
- 'sudo lxc-start -n $LXC_NAME && until (sudo lxc-info -n $LXC_NAME | grep -q ^IP:); do printf . && sleep 1; done && sleep 2'
|
||||
- export LXC_IP="$(sudo lxc-info -Hin $LXC_NAME)"
|
||||
- sudo /bin/bash -c "printf '\n$LXC_IP test.lxc\n' >> /etc/hosts"
|
||||
- ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
|
||||
- chmod 0644 ~/.ssh/config
|
||||
- sudo mkdir -vm 0700 $LXC_ROOTFS/root/.ssh/
|
||||
- sudo cp -v ~/.ssh/id_rsa.pub $LXC_ROOTFS/root/.ssh/authorized_keys
|
||||
- sudo apt-get install build-essential libssl-dev libffi-dev python-dev
|
||||
- echo -e "#cloud-config\nssh_authorized_keys:\n - $(cat ~/.ssh/id_rsa.pub)" | sudo lxc profile set default user.user-data -
|
||||
- sudo cp -f tests/lxd-bridge /etc/default/lxd-bridge
|
||||
- sudo service lxd restart
|
||||
- sudo lxc launch ${LXC_DISTRO}:${LXC_RELEASE} ${LXC_NAME}
|
||||
- until host ${LXC_NAME}.lxd 10.0.8.1 -t A; do sleep 3; done
|
||||
- export LXC_IP="$(dig ${LXC_NAME}.lxd @10.0.8.1 +short)"
|
||||
- pip install -r requirements.txt
|
||||
- pip install ansible-lint
|
||||
- gem install awesome_bot
|
||||
|
@ -61,12 +65,10 @@ install:
|
|||
|
||||
script:
|
||||
# - awesome_bot --allow-dupe --skip-save-results *.md docs/*.md --white-list paypal.com,do.co,microsoft.com,https://github.com/trailofbits/algo/archive/master.zip,https://github.com/trailofbits/algo/issues/new
|
||||
# - shellcheck algo
|
||||
# - ansible-lint deploy.yml users.yml deploy_client.yml
|
||||
- ansible-playbook deploy.yml --syntax-check
|
||||
# - shellcheck algo
|
||||
# - ansible-lint main.yml users.yml deploy_client.yml
|
||||
- ansible-playbook main.yml --syntax-check
|
||||
- ./tests/local-deploy.sh
|
||||
|
||||
after_script:
|
||||
- ./tests/update-users.sh
|
||||
|
||||
notifications:
|
||||
|
|
42
CHANGELOG.md
|
@ -1,3 +1,45 @@
|
|||
## 20 Oct 2018
|
||||
### Added
|
||||
- AWS Lightsail
|
||||
|
||||
## 7 Sep 2018
|
||||
### Changed
|
||||
- Azure: Deployment via Azure Resource Manager
|
||||
|
||||
## 27 Aug 2018
|
||||
### Changed
|
||||
- Large refactor to support Ansible 2.5. [Details](https://github.com/trailofbits/algo/pull/976)
|
||||
- Add a new cloud provider - Vultr
|
||||
|
||||
### Upgrade notes
|
||||
- If any problems encountered follow the [instructions](https://github.com/trailofbits/algo#deploy-the-algo-server) from scratch
|
||||
- You can't update users on your old servers with the new code. Use the old code before this release or rebuild the server from scratch
|
||||
- Update AWS IAM permissions for your user as per [issue](https://github.com/trailofbits/algo/issues/1079#issuecomment-416577599)
|
||||
|
||||
## 04 Jun 2018
|
||||
### Changed
|
||||
- Switched to [new cipher suite](https://github.com/trailofbits/algo/issues/981)
|
||||
|
||||
## 24 May 2018
|
||||
### Changed
|
||||
- Switched to Ubuntu 18.04
|
||||
|
||||
### Removed
|
||||
- Lightsail support until they have Ubuntu 18.04
|
||||
|
||||
### Fixed
|
||||
- Scaleway API paginagion
|
||||
|
||||
## 30 Apr 2018
|
||||
### Added
|
||||
- WireGuard support
|
||||
|
||||
### Removed
|
||||
- Android StrongSwan profiles
|
||||
|
||||
### Release notes
|
||||
- StrongSwan profiles for Android are deprecated now. Use WireGuard
|
||||
|
||||
## 25 Apr 2018
|
||||
### Added
|
||||
- DNScrypt-proxy added
|
||||
|
|
29
PULL_REQUEST_TEMPLATE.md
Normal file
|
@ -0,0 +1,29 @@
|
|||
<!--- Provide a general summary of your changes in the Title above -->
|
||||
|
||||
## Description
|
||||
<!--- Describe your changes in detail -->
|
||||
|
||||
## Motivation and Context
|
||||
<!--- Why is this change required? What problem does it solve? -->
|
||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||
|
||||
## How Has This Been Tested?
|
||||
<!--- Please describe in detail how you tested your changes. -->
|
||||
<!--- Include details of your testing environment, tests ran to see how -->
|
||||
<!--- your change affects other areas of the code, etc. -->
|
||||
|
||||
## Types of changes
|
||||
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
|
||||
- [] Bug fix (non-breaking change which fixes an issue)
|
||||
- [] New feature (non-breaking change which adds functionality)
|
||||
- [] Breaking change (fix or feature that would cause existing functionality to not work as expected)
|
||||
|
||||
## Checklist:
|
||||
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
|
||||
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
|
||||
- [] I have read the **CONTRIBUTING** document.
|
||||
- [] My code follows the code style of this project.
|
||||
- [] My change requires a change to the documentation.
|
||||
- [] I have updated the documentation accordingly.
|
||||
- [] I have added tests to cover my changes.
|
||||
- [] All new and existing tests passed.
|
19
README.md
|
@ -8,13 +8,13 @@ Algo VPN is a set of Ansible scripts that simplify the setup of a personal IPSEC
|
|||
|
||||
## Features
|
||||
|
||||
* Supports only IKEv2 with strong crypto: AES-GCM, SHA2, and P-256
|
||||
* Supports only IKEv2 with strong crypto (AES-GCM, SHA2, and P-256) and [WireGuard](https://www.wireguard.com/)
|
||||
* Generates Apple profiles to auto-configure iOS and macOS devices
|
||||
* Includes a helper script to add and remove users
|
||||
* Blocks ads with a local DNS resolver (optional)
|
||||
* Sets up limited SSH users for tunneling traffic (optional)
|
||||
* Based on current versions of Ubuntu and strongSwan
|
||||
* Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack or your own Ubuntu 16.04 LTS server
|
||||
* Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Vultr, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack or your own Ubuntu 18.04 LTS server
|
||||
|
||||
## Anti-features
|
||||
|
||||
|
@ -29,7 +29,7 @@ Algo VPN is a set of Ansible scripts that simplify the setup of a personal IPSEC
|
|||
|
||||
The easiest way to get an Algo server running is to let it set up a _new_ virtual machine in the cloud for you.
|
||||
|
||||
1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon Lightsail](https://aws.amazon.com/lightsail/), [Amazon EC2](https://aws.amazon.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/) and [OpenStack](https://www.openstack.org/).
|
||||
1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon EC2](https://aws.amazon.com/), [Vultr](https://www.vultr.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/) and [DreamCompute](https://www.dreamhost.com/cloud/computing/) or an OpenStack based cloud hosting.
|
||||
|
||||
2. **[Download Algo](https://github.com/trailofbits/algo/archive/master.zip).** Unzip it in a convenient location on your local machine.
|
||||
|
||||
|
@ -97,7 +97,7 @@ Certificates and configuration files that users will need are placed in the `con
|
|||
|
||||
### Android Devices
|
||||
|
||||
No version of Android supports IKEv2. Install the [strongSwan VPN Client for Android 4 and newer](https://play.google.com/store/apps/details?id=org.strongswan.android). Import the corresponding user.p12 certificate to your device. See the [Android setup instructions](/docs/client-android.md) for more a more detailed walkthrough.
|
||||
WireGuard is used to provide VPN services on Android. Install the [WireGuard VPN Client](https://play.google.com/store/apps/details?id=com.wireguard.android). Import the corresponding `wireguard/<name>.conf` file to your device, then setup a new connection with it. See the [Android setup instructions](/docs/client-android.md) for more detailed walkthrough.
|
||||
|
||||
### Windows 10
|
||||
|
||||
|
@ -116,7 +116,7 @@ Network Manager does not support AES-GCM. In order to support Linux Desktop clie
|
|||
|
||||
Install strongSwan, then copy the included ipsec_user.conf, ipsec_user.secrets, user.crt (user certificate), and user.key (private key) files to your client device. These will require customization based on your exact use case. These files were originally generated with a point-to-point OpenWRT-based VPN in mind.
|
||||
|
||||
#### Ubuntu Server 16.04 example
|
||||
#### Ubuntu Server 18.04 example
|
||||
|
||||
1. `sudo apt-get install strongswan strongswan-plugin-openssl`: install strongSwan
|
||||
2. `/etc/ipsec.d/certs`: copy `<name>.crt` from `algo-master/configs/<server_ip>/pki/certs/<name>.crt`
|
||||
|
@ -132,11 +132,13 @@ One common use case is to let your server access your local LAN without going th
|
|||
|
||||
conn lan-passthrough
|
||||
leftsubnet=192.168.1.1/24 # Replace with your LAN subnet
|
||||
rightsubnet=192.168.1.1/24 # Replac with your LAND subnet
|
||||
rightsubnet=192.168.1.1/24 # Replace with your LAN subnet
|
||||
authby=never # No authentication necessary
|
||||
type=pass # passthrough
|
||||
auto=route # no need to ipsec up lan-passthrough
|
||||
|
||||
To configure the connection to come up at boot time replace `auto=add` with `auto=start`.
|
||||
|
||||
### Other Devices
|
||||
|
||||
Depending on the platform, you may need one or multiple of the following files.
|
||||
|
@ -190,12 +192,15 @@ After this process completes, the Algo VPN server will contains only the users l
|
|||
* Client setup
|
||||
- Setup [Android](docs/client-android.md) clients
|
||||
- Setup [Generic/Linux](docs/client-linux.md) clients with Ansible
|
||||
- Setup Ubuntu clients to use [WireGuard](docs/client-linux-wireguard.md)
|
||||
* Cloud setup
|
||||
- Configure [Amazon EC2](docs/cloud-amazon-ec2.md)
|
||||
- Configure [Azure](docs/cloud-azure.md)
|
||||
- Configure [DigitalOcean](docs/cloud-do.md)
|
||||
- Configure [Google Cloud Platform](docs/cloud-gce.md)
|
||||
* Advanced Deployment
|
||||
- Deploy to your own [FreeBSD](docs/deploy-to-freebsd.md) server
|
||||
- Deploy to your own [Ubuntu 16.04](docs/deploy-to-ubuntu.md) server
|
||||
- Deploy to your own [Ubuntu 18.04](docs/deploy-to-ubuntu.md) server
|
||||
- Deploy to an [unsupported cloud provider](docs/deploy-to-unsupported-cloud.md)
|
||||
* [FAQ](docs/faq.md)
|
||||
* [Troubleshooting](docs/troubleshooting.md)
|
||||
|
|
617
algo
|
@ -14,618 +14,9 @@ then
|
|||
fi
|
||||
fi
|
||||
|
||||
SKIP_TAGS="_null encrypted"
|
||||
ADDITIONAL_PROMPT="[pasted values will not be displayed]"
|
||||
|
||||
additional_roles () {
|
||||
|
||||
read -p "
|
||||
Do you want macOS/iOS clients to enable \"VPN On Demand\" when connected to cellular networks?
|
||||
[y/N]: " -r OnDemandEnabled_Cellular
|
||||
OnDemandEnabled_Cellular=${OnDemandEnabled_Cellular:-n}
|
||||
if [[ "$OnDemandEnabled_Cellular" =~ ^(y|Y)$ ]]; then EXTRA_VARS+=" OnDemandEnabled_Cellular=Y"; fi
|
||||
|
||||
read -p "
|
||||
Do you want macOS/iOS clients to enable \"VPN On Demand\" when connected to Wi-Fi?
|
||||
[y/N]: " -r OnDemandEnabled_WIFI
|
||||
OnDemandEnabled_WIFI=${OnDemandEnabled_WIFI:-n}
|
||||
if [[ "$OnDemandEnabled_WIFI" =~ ^(y|Y)$ ]]; then EXTRA_VARS+=" OnDemandEnabled_WIFI=Y"; fi
|
||||
|
||||
if [[ "$OnDemandEnabled_WIFI" =~ ^(y|Y)$ ]]; then
|
||||
read -p "
|
||||
List the names of trusted Wi-Fi networks (if any) that macOS/iOS clients exclude from using the VPN (e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
|
||||
: " -r OnDemandEnabled_WIFI_EXCLUDE
|
||||
OnDemandEnabled_WIFI_EXCLUDE=${OnDemandEnabled_WIFI_EXCLUDE:-_null}
|
||||
EXTRA_VARS+=" OnDemandEnabled_WIFI_EXCLUDE=\"$OnDemandEnabled_WIFI_EXCLUDE\""
|
||||
fi
|
||||
|
||||
read -p "
|
||||
Do you want to install a DNS resolver on this VPN server, to block ads while surfing?
|
||||
[y/N]: " -r dns_enabled
|
||||
dns_enabled=${dns_enabled:-n}
|
||||
if [[ "$dns_enabled" =~ ^(y|Y)$ ]]; then ROLES+=" dns"; EXTRA_VARS+=" local_dns=true"; fi
|
||||
|
||||
read -p "
|
||||
Do you want each user to have their own account for SSH tunneling?
|
||||
[y/N]: " -r ssh_tunneling_enabled
|
||||
ssh_tunneling_enabled=${ssh_tunneling_enabled:-n}
|
||||
if [[ "$ssh_tunneling_enabled" =~ ^(y|Y)$ ]]; then ROLES+=" ssh_tunneling"; fi
|
||||
|
||||
read -p "
|
||||
Do you want the VPN to support Windows 10 or Linux Desktop clients? (enables compatible ciphers and key exchange, less secure)
|
||||
[y/N]: " -r Win10_Enabled
|
||||
Win10_Enabled=${Win10_Enabled:-n}
|
||||
if [[ "$Win10_Enabled" =~ ^(y|Y)$ ]]; then EXTRA_VARS+=" Win10_Enabled=Y"; fi
|
||||
|
||||
read -p "
|
||||
Do you want to retain the CA key? (required to add users in the future, but less secure)
|
||||
[y/N]: " -r Store_CAKEY
|
||||
Store_CAKEY=${Store_CAKEY:-N}
|
||||
if [[ "$Store_CAKEY" =~ ^(n|N)$ ]]; then EXTRA_VARS+=" Store_CAKEY=N"; fi
|
||||
|
||||
}
|
||||
|
||||
deploy () {
|
||||
|
||||
ansible-playbook deploy.yml -t "${ROLES// /,}" -e "${EXTRA_VARS}" --skip-tags "${SKIP_TAGS// /,}"
|
||||
|
||||
}
|
||||
|
||||
azure () {
|
||||
read -p "
|
||||
Enter your azure secret id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs azure_secret
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your azure tenant id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs azure_tenant
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your azure client id (application id) (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs azure_client_id
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your azure subscription id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs azure_subscription_id
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo]: " -r azure_server_name
|
||||
azure_server_name=${azure_server_name:-algo}
|
||||
|
||||
read -p "
|
||||
|
||||
What region should the server be located in? (https://azure.microsoft.com/en-us/regions/)
|
||||
1. East US (Virginia)
|
||||
2. East US 2 (Virginia)
|
||||
3. Central US (Iowa)
|
||||
4. North Central US (Illinois)
|
||||
5. South Central US (Texas)
|
||||
6. West Central US (Wyoming)
|
||||
7. West US (California)
|
||||
8. West US 2 (Washington)
|
||||
9. Canada East (Quebec City)
|
||||
10. Canada Central (Toronto)
|
||||
11. Brazil South (Sao Paulo State)
|
||||
12. North Europe (Ireland)
|
||||
13. West Europe (Netherlands)
|
||||
14. France Central (Paris)
|
||||
15. France South (Marseille)
|
||||
16. UK West (Cardiff)
|
||||
17. UK South (London)
|
||||
18. Germany Central (Frankfurt)
|
||||
19. Germany Northeast (Magdeburg)
|
||||
20. Southeast Asia (Singapore)
|
||||
21. East Asia (Hong Kong)
|
||||
22. Australia East (New South Wales)
|
||||
23. Australia Southeast (Victoria)
|
||||
24. Australia Central (Canberra)
|
||||
25. Australia Central 2 (Canberra)
|
||||
26. Central India (Pune)
|
||||
27. West India (Mumbai)
|
||||
28. South India (Chennai)
|
||||
29. Japan East (Tokyo, Saitama)
|
||||
30. Japan West (Osaka)
|
||||
31. Korea Central (Seoul)
|
||||
32. Korea South (Busan)
|
||||
|
||||
Enter the number of your desired region:
|
||||
[1]: " -r azure_region
|
||||
azure_region=${azure_region:-1}
|
||||
|
||||
case "$azure_region" in
|
||||
1) region="eastus" ;;
|
||||
2) region="eastus2" ;;
|
||||
3) region="centralus" ;;
|
||||
4) region="northcentralus" ;;
|
||||
5) region="southcentralus" ;;
|
||||
6) region="westcentralus" ;;
|
||||
7) region="westus" ;;
|
||||
8) region="westus2" ;;
|
||||
9) region="canadaeast" ;;
|
||||
10) region="canadacentral" ;;
|
||||
11) region="brazilsouth" ;;
|
||||
12) region="northeurope" ;;
|
||||
13) region="westeurope" ;;
|
||||
14) region="francecentral" ;;
|
||||
15) region="francesouth" ;;
|
||||
16) region="ukwest" ;;
|
||||
17) region="uksouth" ;;
|
||||
18) region="germanycentral" ;;
|
||||
19) region="germanynortheast" ;;
|
||||
20) region="southeastasia" ;;
|
||||
21) region="eastasia" ;;
|
||||
22) region="australiaeast" ;;
|
||||
23) region="australiasoutheast" ;;
|
||||
24) region="australiacentral" ;;
|
||||
25) region="australiacentral2" ;;
|
||||
26) region="centralindia" ;;
|
||||
27) region="westindia" ;;
|
||||
28) region="southindia" ;;
|
||||
29) region="japaneast" ;;
|
||||
30) region="japanwest" ;;
|
||||
31) region="koreacentral" ;;
|
||||
32) region="koreasouth" ;;
|
||||
esac
|
||||
|
||||
ROLES="azure vpn cloud"
|
||||
EXTRA_VARS="azure_secret=$azure_secret azure_tenant=$azure_tenant azure_client_id=$azure_client_id azure_subscription_id=$azure_subscription_id azure_server_name=$azure_server_name ssh_public_key=$ssh_public_key region=$region"
|
||||
}
|
||||
|
||||
digitalocean () {
|
||||
read -p "
|
||||
Enter your API token. The token must have read and write permissions (https://cloud.digitalocean.com/settings/api/tokens):
|
||||
$ADDITIONAL_PROMPT
|
||||
: " -rs do_access_token
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo.local]: " -r do_server_name
|
||||
do_server_name=${do_server_name:-algo.local}
|
||||
|
||||
read -p "
|
||||
|
||||
What region should the server be located in?
|
||||
1. Amsterdam (Datacenter 2)
|
||||
2. Amsterdam (Datacenter 3)
|
||||
3. Frankfurt
|
||||
4. London
|
||||
5. New York (Datacenter 1)
|
||||
6. New York (Datacenter 2)
|
||||
7. New York (Datacenter 3)
|
||||
8. San Francisco (Datacenter 1)
|
||||
9. San Francisco (Datacenter 2)
|
||||
10. Singapore
|
||||
11. Toronto
|
||||
12. Bangalore
|
||||
|
||||
Enter the number of your desired region:
|
||||
[7]: " -r region
|
||||
region=${region:-7}
|
||||
|
||||
case "$region" in
|
||||
1) do_region="ams2" ;;
|
||||
2) do_region="ams3" ;;
|
||||
3) do_region="fra1" ;;
|
||||
4) do_region="lon1" ;;
|
||||
5) do_region="nyc1" ;;
|
||||
6) do_region="nyc2" ;;
|
||||
7) do_region="nyc3" ;;
|
||||
8) do_region="sfo1" ;;
|
||||
9) do_region="sfo2" ;;
|
||||
10) do_region="sgp1" ;;
|
||||
11) do_region="tor1" ;;
|
||||
12) do_region="blr1" ;;
|
||||
esac
|
||||
|
||||
ROLES="digitalocean vpn cloud"
|
||||
EXTRA_VARS="do_access_token=$do_access_token do_server_name=$do_server_name do_region=$do_region"
|
||||
}
|
||||
|
||||
ec2 () {
|
||||
read -p "
|
||||
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md).
|
||||
$ADDITIONAL_PROMPT
|
||||
[AKIA...]: " -rs aws_access_key
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
$ADDITIONAL_PROMPT
|
||||
[ABCD...]: " -rs aws_secret_key
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo]: " -r aws_server_name
|
||||
aws_server_name=${aws_server_name:-algo}
|
||||
|
||||
read -p "
|
||||
|
||||
What region should the server be located in?
|
||||
1. us-east-1 US East (N. Virginia)
|
||||
2. us-east-2 US East (Ohio)
|
||||
3. us-west-1 US West (N. California)
|
||||
4. us-west-2 US West (Oregon)
|
||||
5. ca-central-1 Canada (Central)
|
||||
6. eu-central-1 EU (Frankfurt)
|
||||
7. eu-west-1 EU (Ireland)
|
||||
8. eu-west-2 EU (London)
|
||||
9. eu-west-3 EU (Paris)
|
||||
10. ap-northeast-1 Asia Pacific (Tokyo)
|
||||
11. ap-northeast-2 Asia Pacific (Seoul)
|
||||
12. ap-northeast-3 Asia Pacific (Osaka-Local)
|
||||
13. ap-southeast-1 Asia Pacific (Singapore)
|
||||
14. ap-southeast-2 Asia Pacific (Sydney)
|
||||
15. ap-south-1 Asia Pacific (Mumbai)
|
||||
16. sa-east-1 South America (São Paulo)
|
||||
|
||||
Enter the number of your desired region:
|
||||
[1]: " -r aws_region
|
||||
aws_region=${aws_region:-1}
|
||||
|
||||
case "$aws_region" in
|
||||
1) region="us-east-1" ;;
|
||||
2) region="us-east-2" ;;
|
||||
3) region="us-west-1" ;;
|
||||
4) region="us-west-2" ;;
|
||||
5) region="ca-central-1" ;;
|
||||
6) region="eu-central-1" ;;
|
||||
7) region="eu-west-1" ;;
|
||||
8) region="eu-west-2" ;;
|
||||
9) region="eu-west-3" ;;
|
||||
10) region="ap-northeast-1" ;;
|
||||
11) region="ap-northeast-2" ;;
|
||||
12) region="ap-northeast-3";;
|
||||
13) region="ap-southeast-1" ;;
|
||||
14) region="ap-southeast-2" ;;
|
||||
15) region="ap-south-1" ;;
|
||||
16) region="sa-east-1" ;;
|
||||
esac
|
||||
|
||||
ROLES="ec2 vpn cloud"
|
||||
EXTRA_VARS="aws_access_key=$aws_access_key aws_secret_key=$aws_secret_key aws_server_name=$aws_server_name region=$region"
|
||||
}
|
||||
|
||||
lightsail () {
|
||||
read -p "
|
||||
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md).
|
||||
$ADDITIONAL_PROMPT
|
||||
[AKIA...]: " -rs aws_access_key
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
$ADDITIONAL_PROMPT
|
||||
[ABCD...]: " -rs aws_secret_key
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo.local]: " -r algo_server_name
|
||||
algo_server_name=${algo_server_name:-algo.local}
|
||||
|
||||
read -p "
|
||||
|
||||
What region should the server be located in?
|
||||
1. us-east-1 US East (N. Virginia)
|
||||
2. us-east-2 US East (Ohio)
|
||||
3. us-west-1 US West (N. California)
|
||||
4. us-west-2 US West (Oregon)
|
||||
5. ap-south-1 Asia Pacific (Mumbai)
|
||||
6. ap-northeast-2 Asia Pacific (Seoul)
|
||||
7. ap-southeast-1 Asia Pacific (Singapore)
|
||||
8. ap-southeast-2 Asia Pacific (Sydney)
|
||||
9. ap-northeast-1 Asia Pacific (Tokyo)
|
||||
10. eu-central-1 EU (Frankfurt)
|
||||
11. eu-west-1 EU (Ireland)
|
||||
12. eu-west-2 EU (London)
|
||||
|
||||
Enter the number of your desired region:
|
||||
[1]: " -r algo_region
|
||||
algo_region=${algo_region:-1}
|
||||
|
||||
case "$algo_region" in
|
||||
1) region="us-east-1" ;;
|
||||
2) region="us-east-2" ;;
|
||||
3) region="us-west-1" ;;
|
||||
4) region="us-west-2" ;;
|
||||
5) region="ap-south-1" ;;
|
||||
6) region="ap-northeast-2" ;;
|
||||
7) region="ap-southeast-1" ;;
|
||||
8) region="ap-southeast-2" ;;
|
||||
9) region="ap-northeast-1" ;;
|
||||
10) region="eu-central-1" ;;
|
||||
11) region="eu-west-1" ;;
|
||||
12) region="eu-west-2";;
|
||||
esac
|
||||
|
||||
ROLES="lightsail vpn cloud"
|
||||
EXTRA_VARS="aws_access_key=$aws_access_key aws_secret_key=$aws_secret_key algo_server_name=$algo_server_name region=$region"
|
||||
}
|
||||
|
||||
scaleway () {
|
||||
read -p "
|
||||
Enter your auth token (https://www.scaleway.com/docs/generate-an-api-token/)
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs scaleway_auth_token
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your organization name (https://cloud.scaleway.com/#/billing)
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs scaleway_organization
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo.local]: " -r algo_server_name
|
||||
algo_server_name=${algo_server_name:-algo.local}
|
||||
|
||||
read -p "
|
||||
|
||||
What region should the server be located in?
|
||||
1. par1 Paris
|
||||
2. ams1 Amsterdam
|
||||
Enter the number of your desired region:
|
||||
[1]: " -r algo_region
|
||||
algo_region=${algo_region:-1}
|
||||
|
||||
case "$algo_region" in
|
||||
1) region="par1" ;;
|
||||
2) region="ams1" ;;
|
||||
esac
|
||||
|
||||
ROLES="scaleway vpn cloud"
|
||||
EXTRA_VARS="scaleway_auth_token=$scaleway_auth_token scaleway_organization=\"$scaleway_organization\" algo_server_name=$algo_server_name algo_region=$region"
|
||||
}
|
||||
|
||||
openstack () {
|
||||
read -p "
|
||||
Enter the local path to your credentials OpenStack RC file (Can be downloaded from the OpenStack dashboard->Compute->API Access)
|
||||
[...]: " -r os_rc
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo.local]: " -r algo_server_name
|
||||
algo_server_name=${algo_server_name:-algo.local}
|
||||
|
||||
ROLES="openstack vpn cloud"
|
||||
EXTRA_VARS="algo_server_name=$algo_server_name"
|
||||
source $os_rc
|
||||
}
|
||||
|
||||
gce () {
|
||||
read -p "
|
||||
Enter the local path to your credentials JSON file (https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts):
|
||||
: " -r credentials_file
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo]: " -r server_name
|
||||
server_name=${server_name:-algo}
|
||||
|
||||
read -p "
|
||||
|
||||
What zone should the server be located in?
|
||||
1. Western US (Oregon A)
|
||||
2. Western US (Oregon B)
|
||||
3. Western US (Oregon C)
|
||||
4. Central US (Iowa A)
|
||||
5. Central US (Iowa B)
|
||||
6. Central US (Iowa C)
|
||||
7. Central US (Iowa F)
|
||||
8. Eastern US (Northern Virginia A)
|
||||
9. Eastern US (Northern Virginia B)
|
||||
10. Eastern US (Northern Virginia C)
|
||||
11. Eastern US (South Carolina B)
|
||||
12. Eastern US (South Carolina C)
|
||||
13. Eastern US (South Carolina D)
|
||||
14. Western Europe (Belgium B)
|
||||
15. Western Europe (Belgium C)
|
||||
16. Western Europe (Belgium D)
|
||||
17. Western Europe (London A)
|
||||
18. Western Europe (London B)
|
||||
19. Western Europe (London C)
|
||||
20. Western Europe (Frankfurt A)
|
||||
21. Western Europe (Frankfurt B)
|
||||
22. Western Europe (Frankfurt C)
|
||||
23. Southeast Asia (Singapore A)
|
||||
24. Southeast Asia (Singapore B)
|
||||
25. East Asia (Taiwan A)
|
||||
26. East Asia (Taiwan B)
|
||||
27. East Asia (Taiwan C)
|
||||
28. Northeast Asia (Tokyo A)
|
||||
29. Northeast Asia (Tokyo B)
|
||||
30. Northeast Asia (Tokyo C)
|
||||
31. Australia (Sydney A)
|
||||
32. Australia (Sydney B)
|
||||
33. Australia (Sydney C)
|
||||
34. South America (São Paulo A)
|
||||
35. South America (São Paulo B)
|
||||
36. South America (São Paulo C)
|
||||
|
||||
Please choose the number of your zone. Press enter for default (#14) zone.
|
||||
[14]: " -r region
|
||||
region=${region:-14}
|
||||
|
||||
case "$region" in
|
||||
1) zone="us-west1-a" ;;
|
||||
2) zone="us-west1-b" ;;
|
||||
3) zone="us-west1-c" ;;
|
||||
4) zone="us-central1-a" ;;
|
||||
5) zone="us-central1-b" ;;
|
||||
6) zone="us-central1-c" ;;
|
||||
7) zone="us-central1-f" ;;
|
||||
8) zone="us-east4-a" ;;
|
||||
9) zone="us-east4-b" ;;
|
||||
10) zone="us-east4-c" ;;
|
||||
11) zone="us-east1-b" ;;
|
||||
12) zone="us-east1-c" ;;
|
||||
13) zone="us-east1-d" ;;
|
||||
14) zone="europe-west1-b" ;;
|
||||
15) zone="europe-west1-c" ;;
|
||||
16) zone="europe-west1-d" ;;
|
||||
17) zone="europe-west2-a" ;;
|
||||
18) zone="europe-west2-b" ;;
|
||||
19) zone="europe-west2-c" ;;
|
||||
20) zone="europe-west3-a" ;;
|
||||
21) zone="europe-west3-b" ;;
|
||||
22) zone="europe-west3-c" ;;
|
||||
23) zone="asia-southeast1-a" ;;
|
||||
24) zone="asia-southeast1-b" ;;
|
||||
25) zone="asia-east1-a" ;;
|
||||
26) zone="asia-east1-b" ;;
|
||||
27) zone="asia-east1-c" ;;
|
||||
28) zone="asia-northeast1-a" ;;
|
||||
29) zone="asia-northeast1-b" ;;
|
||||
30) zone="asia-northeast1-c" ;;
|
||||
31) zone="australia-southeast1-a" ;;
|
||||
32) zone="australia-southeast1-b" ;;
|
||||
33) zone="australia-southeast1-c" ;;
|
||||
34) zone="southamerica-east1-a" ;;
|
||||
35) zone="southamerica-east1-b" ;;
|
||||
36) zone="southamerica-east1-c" ;;
|
||||
esac
|
||||
|
||||
ROLES="gce vpn cloud"
|
||||
EXTRA_VARS="credentials_file=$credentials_file gce_server_name=$server_name ssh_public_key=$ssh_public_key zone=$zone max_mss=1316"
|
||||
}
|
||||
|
||||
non_cloud () {
|
||||
read -p "
|
||||
Enter the IP address of your server: (or use localhost for local installation)
|
||||
[localhost]: " -r server_ip
|
||||
server_ip=${server_ip:-localhost}
|
||||
|
||||
read -p "
|
||||
|
||||
What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost)
|
||||
[root]: " -r server_user
|
||||
server_user=${server_user:-root}
|
||||
|
||||
if [ "x${server_ip}" = "xlocalhost" ]; then
|
||||
myip=""
|
||||
else
|
||||
myip=${server_ip}
|
||||
fi
|
||||
|
||||
read -p "
|
||||
|
||||
Enter the public IP address of your server: (IMPORTANT! This IP is used to verify the certificate)
|
||||
[$myip]: " -r IP_subject
|
||||
IP_subject=${IP_subject:-$myip}
|
||||
|
||||
if [ "x${IP_subject}" = "x" ]; then
|
||||
echo "no server IP given. exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ROLES="local vpn"
|
||||
EXTRA_VARS="server_ip=$server_ip server_user=$server_user IP_subject_alt_name=$IP_subject"
|
||||
SKIP_TAGS+=" cloud update-alternatives"
|
||||
|
||||
read -p "
|
||||
|
||||
Was this server deployed by Algo previously?
|
||||
[y/N]: " -r Deployed_By_Algo
|
||||
Deployed_By_Algo=${Deployed_By_Algo:-n}
|
||||
if [[ "$Deployed_By_Algo" =~ ^(y|Y)$ ]]; then EXTRA_VARS+=" Deployed_By_Algo=Y"; fi
|
||||
|
||||
}
|
||||
|
||||
algo_provisioning () {
|
||||
echo -n "
|
||||
What provider would you like to use?
|
||||
1. DigitalOcean
|
||||
2. Amazon Lightsail
|
||||
3. Amazon EC2
|
||||
4. Microsoft Azure
|
||||
5. Google Compute Engine
|
||||
6. Scaleway
|
||||
7. OpenStack (DreamCompute optimised)
|
||||
8. Install to existing Ubuntu 16.04 server (Advanced)
|
||||
|
||||
Enter the number of your desired provider
|
||||
: "
|
||||
|
||||
read -r N
|
||||
|
||||
case "$N" in
|
||||
1) digitalocean; ;;
|
||||
2) lightsail; ;;
|
||||
3) ec2; ;;
|
||||
4) azure; ;;
|
||||
5) gce; ;;
|
||||
6) scaleway; ;;
|
||||
7) openstack; ;;
|
||||
8) non_cloud; ;;
|
||||
*) exit 1 ;;
|
||||
esac
|
||||
|
||||
additional_roles
|
||||
deploy
|
||||
}
|
||||
|
||||
user_management () {
|
||||
|
||||
read -p "
|
||||
Enter the IP address of your server: (or use localhost for local installation)
|
||||
: " -r server_ip
|
||||
|
||||
read -p "
|
||||
What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost)
|
||||
[root]: " -r server_user
|
||||
server_user=${server_user:-root}
|
||||
|
||||
read -p "
|
||||
Do you want each user to have their own account for SSH tunneling?
|
||||
[y/N]: " -r ssh_tunneling_enabled
|
||||
ssh_tunneling_enabled=${ssh_tunneling_enabled:-n}
|
||||
|
||||
if [ "x${server_ip}" = "xlocalhost" ]; then
|
||||
myip=""
|
||||
else
|
||||
myip=${server_ip}
|
||||
fi
|
||||
|
||||
read -p "
|
||||
|
||||
Enter the public IP address of your server: (IMPORTANT! This IP is used to verify the certificate)
|
||||
[$myip]: " -r IP_subject
|
||||
IP_subject=${IP_subject:-$myip}
|
||||
|
||||
if [ "x${IP_subject}" = "x" ]; then
|
||||
echo "no server IP given. exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
read -p "
|
||||
Enter the password for the private CA key:
|
||||
$ADDITIONAL_PROMPT
|
||||
: " -rs easyrsa_CA_password
|
||||
|
||||
ansible-playbook users.yml -e "server_ip=$server_ip server_user=$server_user ssh_tunneling_enabled=$ssh_tunneling_enabled IP_subject_alt_name=$IP_subject easyrsa_CA_password=$easyrsa_CA_password" -t update-users --skip-tags common
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
update-users) user_management ;;
|
||||
*) algo_provisioning ;;
|
||||
update-users) PLAYBOOK=users.yml; ARGS="${@:2} -t update-users";;
|
||||
*) PLAYBOOK=main.yml; ARGS=${@} ;;
|
||||
esac
|
||||
|
||||
ansible-playbook ${PLAYBOOK} ${ARGS}
|
||||
|
|
|
@ -11,7 +11,7 @@ usage() {
|
|||
retcode="${1:-0}"
|
||||
echo "To run algo from Docker:"
|
||||
echo ""
|
||||
echo "docker run --cap-drop ALL -it -v <path to configurations>:"${DATA_DIR}" trailofbits/algo:latest"
|
||||
echo "docker run --cap-drop=all -it -v <path to configurations>:"${DATA_DIR}" trailofbits/algo:latest"
|
||||
echo ""
|
||||
exit ${retcode}
|
||||
}
|
||||
|
|
86
algo-showenv.sh
Executable file
|
@ -0,0 +1,86 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Print information about Algo's invocation environment to aid in debugging.
|
||||
# This is normally called from Ansible right before a deployment gets underway.
|
||||
|
||||
# Skip printing this header if we're just testing with no arguments.
|
||||
if [[ $# -gt 0 ]]; then
|
||||
echo ""
|
||||
echo "--> Please include the following block of text when reporting issues:"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if [[ ! -f ./algo ]]; then
|
||||
echo "This should be run from the top level Algo directory"
|
||||
fi
|
||||
|
||||
# Determine the operating system.
|
||||
case "$(uname -s)" in
|
||||
Linux)
|
||||
OS="Linux ($(uname -r) $(uname -v))"
|
||||
if [[ -f /etc/os-release ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
# I hope this isn't dangerous.
|
||||
. /etc/os-release
|
||||
if [[ ${PRETTY_NAME} ]]; then
|
||||
OS="${PRETTY_NAME}"
|
||||
elif [[ ${NAME} ]]; then
|
||||
OS="${NAME} ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
STAT="stat -c %y"
|
||||
;;
|
||||
Darwin)
|
||||
OS="$(sw_vers -productName) $(sw_vers -productVersion)"
|
||||
STAT="stat -f %Sm"
|
||||
;;
|
||||
*)
|
||||
OS="Unknown"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Determine if virtualization is being used with Linux.
|
||||
VIRTUALIZED=""
|
||||
if [[ -x $(command -v systemd-detect-virt) ]]; then
|
||||
DETECT_VIRT="$(systemd-detect-virt)"
|
||||
if [[ ${DETECT_VIRT} != "none" ]]; then
|
||||
VIRTUALIZED=" (Virtualized: ${DETECT_VIRT})"
|
||||
fi
|
||||
elif [[ -f /.dockerenv ]]; then
|
||||
VIRTUALIZED=" (Virtualized: docker)"
|
||||
fi
|
||||
|
||||
echo "Algo running on: ${OS}${VIRTUALIZED}"
|
||||
|
||||
# Determine the currentness of the Algo software.
|
||||
if [[ -d .git && -x $(command -v git) ]]; then
|
||||
ORIGIN="$(git remote get-url origin)"
|
||||
COMMIT="$(git log --max-count=1 --oneline --no-decorate --no-color)"
|
||||
if [[ ${ORIGIN} == "https://github.com/trailofbits/algo.git" ]]; then
|
||||
SOURCE="clone"
|
||||
else
|
||||
SOURCE="fork"
|
||||
fi
|
||||
echo "Created from git ${SOURCE}. Last commit: ${COMMIT}"
|
||||
elif [[ -f LICENSE && ${STAT} ]]; then
|
||||
CREATED="$(${STAT} LICENSE)"
|
||||
echo "ZIP file created: ${CREATED}"
|
||||
fi
|
||||
|
||||
# The Python version might be useful to know.
|
||||
if [[ -x ./env/bin/python ]]; then
|
||||
./env/bin/python --version 2>&1
|
||||
elif [[ -f ./algo ]]; then
|
||||
echo "env/bin/python not found: has 'python -m virtualenv ...' been run?"
|
||||
fi
|
||||
|
||||
# Just print out all command line arguments, which are expected
|
||||
# to be Ansible variables.
|
||||
if [[ $# -gt 0 ]]; then
|
||||
echo "Runtime variables:"
|
||||
for VALUE in "$@"; do
|
||||
echo " ${VALUE}"
|
||||
done
|
||||
fi
|
||||
|
||||
exit 0
|
|
@ -4,6 +4,7 @@ pipelining = True
|
|||
retry_files_enabled = False
|
||||
host_key_checking = False
|
||||
timeout = 60
|
||||
stdout_callback = full_skip
|
||||
|
||||
[paramiko_connection]
|
||||
record_host_keys = False
|
||||
|
|
49
cloud.yml
Normal file
|
@ -0,0 +1,49 @@
|
|||
---
|
||||
- name: Provision the server
|
||||
hosts: localhost
|
||||
tags: always
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
pre_tasks:
|
||||
- block:
|
||||
- name: Local pre-tasks
|
||||
import_tasks: playbooks/cloud-pre.yml
|
||||
tags: always
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
roles:
|
||||
- role: cloud-digitalocean
|
||||
when: algo_provider == "digitalocean"
|
||||
- role: cloud-ec2
|
||||
when: algo_provider == "ec2"
|
||||
- role: cloud-vultr
|
||||
when: algo_provider == "vultr"
|
||||
- role: cloud-gce
|
||||
when: algo_provider == "gce"
|
||||
- role: cloud-azure
|
||||
when: algo_provider == "azure"
|
||||
- role: cloud-lightsail
|
||||
when: algo_provider == "lightsail"
|
||||
- role: cloud-scaleway
|
||||
when: algo_provider == "scaleway"
|
||||
- role: cloud-openstack
|
||||
when: algo_provider == "openstack"
|
||||
- role: local
|
||||
when: algo_provider == "local"
|
||||
|
||||
post_tasks:
|
||||
- block:
|
||||
- name: Local post-tasks
|
||||
import_tasks: playbooks/cloud-post.yml
|
||||
become: false
|
||||
tags: cloud
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
89
config.cfg
|
@ -10,14 +10,23 @@ users:
|
|||
|
||||
### Advanced users only below this line ###
|
||||
|
||||
# If True re-init all existing certificates. (True or False)
|
||||
easyrsa_reinit_existent: False
|
||||
# If True re-init all existing certificates. Boolean
|
||||
keys_clean_all: False
|
||||
|
||||
vpn_network: 10.19.48.0/24
|
||||
vpn_network_ipv6: 'fd9d:bc11:4020::/48'
|
||||
wireguard_enabled: true
|
||||
wireguard_port: 51820
|
||||
|
||||
server_name: "{{ ansible_ssh_host }}"
|
||||
IP_subject_alt_name: "{{ ansible_ssh_host }}"
|
||||
# MSS is the TCP Max Segment Size
|
||||
# Setting the 'max_mss' Ansible variable can solve some issues related to packet fragmentation
|
||||
# This appears to be necessary on (at least) Google Cloud,
|
||||
# however, some routers also require a change to this parameter
|
||||
# See also:
|
||||
# - https://github.com/trailofbits/algo/issues/216
|
||||
# - https://github.com/trailofbits/algo/issues?utf8=%E2%9C%93&q=is%3Aissue%20mtu
|
||||
# - https://serverfault.com/questions/601143/ssh-not-working-over-ipsec-tunnel-strongswan
|
||||
#max_mss: 1316
|
||||
|
||||
# StrongSwan log level
|
||||
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration
|
||||
|
@ -29,13 +38,25 @@ adblock_lists:
|
|||
- "https://www.malwaredomainlist.com/hostslist/hosts.txt"
|
||||
- "https://hosts-file.net/ad_servers.txt"
|
||||
|
||||
# Enalbe DNS encryption. Use dns_encrypted_provider to specify the provider. If false dns_servers should be specified
|
||||
# Enable DNS encryption.
|
||||
# If 'false', 'dns_servers' should be specified below.
|
||||
dns_encryption: true
|
||||
|
||||
# Possible values: google, cloudflare
|
||||
dns_encryption_provider: cloudflare
|
||||
# DNS servers which will be used if 'dns_encryption' is 'true'. Multiple
|
||||
# providers may be specified, but avoid mixing providers that filter results
|
||||
# (like Cisco) with those that don't (like Cloudflare) or you could get
|
||||
# inconsistent results. The list of available public providers can be found
|
||||
# here:
|
||||
# https://github.com/DNSCrypt/dnscrypt-resolvers/blob/master/v2/public-resolvers.md
|
||||
dnscrypt_servers:
|
||||
ipv4:
|
||||
- cloudflare
|
||||
# - google
|
||||
ipv6:
|
||||
- cloudflare-ipv6
|
||||
|
||||
# DNS servers which will be used if dns_encryption disabled
|
||||
# DNS servers which will be used if 'dns_encryption' is 'false'.
|
||||
# The default is to use Cloudflare.
|
||||
dns_servers:
|
||||
ipv4:
|
||||
- 1.1.1.1
|
||||
|
@ -47,12 +68,21 @@ dns_servers:
|
|||
# IP address for the local dns resolver
|
||||
local_service_ip: 172.16.0.1
|
||||
|
||||
# Your Algo server will automatically install security updates. Some updates
|
||||
# require a reboot to take effect but your Algo server will not reboot itself
|
||||
# automatically unless you change 'enabled' below from 'false' to 'true', in
|
||||
# which case a reboot will take place if necessary at the time specified (as
|
||||
# HH:MM) in the time zone of your Algo server. The default time zone is UTC.
|
||||
unattended_reboot:
|
||||
enabled: false
|
||||
time: 06:00
|
||||
|
||||
pkcs12_PayloadCertificateUUID: "{{ 900000 | random | to_uuid | upper }}"
|
||||
VPN_PayloadIdentifier: "{{ 800000 | random | to_uuid | upper }}"
|
||||
CA_PayloadIdentifier: "{{ 700000 | random | to_uuid | upper }}"
|
||||
|
||||
# Block traffic between connected clients
|
||||
BetweenClients_DROP: Y
|
||||
BetweenClients_DROP: true
|
||||
|
||||
congrats:
|
||||
common: |
|
||||
|
@ -61,11 +91,11 @@ congrats:
|
|||
"# Config files and certificates are in the ./configs/ directory. #"
|
||||
"# Go to https://whoer.net/ after connecting #"
|
||||
"# and ensure that all your traffic passes through the VPN. #"
|
||||
"# Local DNS resolver {{ local_service_ip }} #"
|
||||
"# Local DNS resolver {{ local_service_ip }} #"
|
||||
p12_pass: |
|
||||
"# The p12 and SSH keys password for new users is {{ easyrsa_p12_export_password }} #"
|
||||
"# The p12 and SSH keys password for new users is {{ p12_export_password }} #"
|
||||
ca_key_pass: |
|
||||
"# The CA key password is {{ easyrsa_CA_password }} #"
|
||||
"# The CA key password is {{ CA_password }} #"
|
||||
ssh_access: |
|
||||
"# Shell access: ssh -i {{ ansible_ssh_private_key_file|default(omit) }} {{ ansible_ssh_user|default(omit) }}@{{ ansible_ssh_host|default(omit) }} #"
|
||||
|
||||
|
@ -77,35 +107,46 @@ SSH_keys:
|
|||
cloud_providers:
|
||||
azure:
|
||||
size: Basic_A0
|
||||
image:
|
||||
offer: UbuntuServer
|
||||
publisher: Canonical
|
||||
sku: '16.04-LTS' # 16.04-LTS / 17.04
|
||||
version: latest
|
||||
image: 18.04-LTS
|
||||
digitalocean:
|
||||
size: s-1vcpu-1gb
|
||||
image: "ubuntu-16-04-x64" # ubuntu-16-04-x64 / ubuntu-17-10-x64
|
||||
image: "ubuntu-18-04-x64"
|
||||
# Change the encrypted flag to "true" to enable AWS volume encryption, for encryption of data at rest.
|
||||
# Warning: the Algo script will take approximately 6 minutes longer to complete.
|
||||
# Also note that the documented AWS minimum permissions aren't sufficient.
|
||||
# You will have to edit the AWS user policy documented at
|
||||
# https://github.com/trailofbits/algo/blob/master/docs/cloud-amazon-ec2.md to also allow "ec2:CopyImage".
|
||||
# See https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-edit.html
|
||||
ec2:
|
||||
encrypted: false
|
||||
size: t2.micro
|
||||
image:
|
||||
name: "ubuntu-xenial-16.04" # ubuntu-xenial-16.04 / ubuntu-zesty-17.04
|
||||
name: "ubuntu-bionic-18.04"
|
||||
owner: "099720109477"
|
||||
gce:
|
||||
size: f1-micro
|
||||
image: ubuntu-1604 # ubuntu-1604 / ubuntu-1704
|
||||
image: ubuntu-1804
|
||||
external_static_ip: false
|
||||
lightsail:
|
||||
size: nano_1_0
|
||||
image: ubuntu_16_04
|
||||
image: ubuntu_18_04
|
||||
scaleway:
|
||||
size: VC1S
|
||||
image: Ubuntu Xenial
|
||||
size: START1-S
|
||||
image: Ubuntu Bionic Beaver
|
||||
arch: x86_64
|
||||
openstack:
|
||||
flavor_ram: ">=512"
|
||||
image: Ubuntu-16.04
|
||||
image: Ubuntu-18.04
|
||||
vultr:
|
||||
os: Ubuntu 18.04 x64
|
||||
size: 1024 MB RAM,25 GB SSD,1.00 TB BW
|
||||
local:
|
||||
|
||||
fail_hint:
|
||||
- Sorry, but something went wrong!
|
||||
- Please check the troubleshooting guide.
|
||||
- https://trailofbits.github.io/algo/troubleshooting.html
|
||||
|
||||
booleans_map:
|
||||
Y: true
|
||||
y: true
|
||||
|
|
98
deploy.yml
|
@ -1,98 +0,0 @@
|
|||
- name: Configure the server
|
||||
hosts: localhost
|
||||
tags: algo
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
pre_tasks:
|
||||
- block:
|
||||
- name: Local pre-tasks
|
||||
include_tasks: playbooks/local.yml
|
||||
tags: [ 'always' ]
|
||||
|
||||
- name: Local pre-tasks
|
||||
include_tasks: playbooks/local_ssh.yml
|
||||
become: false
|
||||
when: Deployed_By_Algo is defined and Deployed_By_Algo == "Y"
|
||||
tags: [ 'local' ]
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
roles:
|
||||
- { role: cloud-digitalocean, tags: ['digitalocean'] }
|
||||
- { role: cloud-ec2, tags: ['ec2'] }
|
||||
- { role: cloud-gce, tags: ['gce'] }
|
||||
- { role: cloud-azure, tags: ['azure'] }
|
||||
- { role: cloud-lightsail, tags: ['lightsail'] }
|
||||
- { role: cloud-scaleway, tags: ['scaleway'] }
|
||||
- { role: cloud-openstack, tags: ['openstack'] }
|
||||
- { role: local, tags: ['local'] }
|
||||
|
||||
post_tasks:
|
||||
- block:
|
||||
- name: Local post-tasks
|
||||
include_tasks: playbooks/post.yml
|
||||
become: false
|
||||
tags: [ 'cloud' ]
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
- name: Configure the server and install required software
|
||||
hosts: vpn-host
|
||||
gather_facts: false
|
||||
tags: algo
|
||||
become: true
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
pre_tasks:
|
||||
- block:
|
||||
- name: Common pre-tasks
|
||||
include_tasks: playbooks/common.yml
|
||||
tags: [ 'digitalocean', 'ec2', 'gce', 'azure', 'lightsail', 'scaleway', 'openstack', 'local', 'pre' ]
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
roles:
|
||||
- { role: dns_adblocking, tags: [ 'dns', 'adblock' ] }
|
||||
- { role: ssh_tunneling, tags: [ 'ssh_tunneling' ] }
|
||||
- { role: vpn, tags: [ 'vpn' ] }
|
||||
|
||||
post_tasks:
|
||||
- block:
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass }}"
|
||||
- " {% if Store_CAKEY is defined and Store_CAKEY == 'N' %}{% else %}{{ congrats.ca_key_pass }}{% endif %}"
|
||||
- " {% if cloud_deployment is defined %}{{ congrats.ssh_access }}{% endif %}"
|
||||
tags: always
|
||||
|
||||
- name: Save the CA key password
|
||||
local_action: >
|
||||
shell echo "{{ easyrsa_CA_password }}" > /tmp/ca_password
|
||||
become: no
|
||||
tags: tests
|
||||
|
||||
- name: Delete the CA key
|
||||
local_action:
|
||||
module: file
|
||||
path: "configs/{{ IP_subject_alt_name }}/pki/private/cakey.pem"
|
||||
state: absent
|
||||
become: no
|
||||
tags: always
|
||||
when: Store_CAKEY is defined and Store_CAKEY == "N"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
|
@ -2,48 +2,5 @@
|
|||
|
||||
## Installation via profiles
|
||||
|
||||
1. [Install the strongSwan VPN Client](https://play.google.com/store/apps/details?id=org.strongswan.android).
|
||||
2. Copy `android_{username}.sswan` and `android_{username}_helper.html` to your phone's internal storage.
|
||||
3. Open the StrongSwan app and go to 'Import VPN profile'.
|
||||
4. Select the `android_{username}.sswan` file to configure the VPN with your profile.
|
||||
|
||||
## Manual installation
|
||||
|
||||
**NOTE:** If you are a Project Fi user, you must disable WiFi Assistant before continuing. See the [strongSwan documentation](https://wiki.strongswan.org/projects/strongswan/wiki/AndroidVPNClient) for details.
|
||||
|
||||
| Instruction | Screenshot(s) |
|
||||
| ----------- | ---------- |
|
||||
| 1. Copy your `{username}.p12` certificate to your phone's internal storage. | |
|
||||
| 2. [Install the strongSwan VPN Client](https://play.google.com/store/apps/details?id=org.strongswan.android) (Android 4+) | |
|
||||
| 3. Open the app and tap "ADD VPN PROFILE" in the top right. | [![step3-thumb]][step3-screen] |
|
||||
| 4. Enter the IP address or hostname of your Algo server and set the "VPN Type" to "IKEv2 Certificate". | [![step4-thumb]][step4-screen] |
|
||||
| 5. Tap "Select user certificate". You will be shown a prompt, tap "INSTALL". | [![step5-thumb]][step5-screen] |
|
||||
| 6. Use the "Open from" menu to select your certificate. If you downloaded your certificate to your phone, you may find that using the "Downloads" shortcut results in your `{username}.p12` certificate being grayed out. If this happens go back to the "Open from" menu and tap on the name of your phone. This will bring up the filesystem. From here, navigate to the folder where you saved your cert (such as "Downloads"), and try again. | [![step6-thumb]][step6-screen] |
|
||||
| 7. Enter the password for your certificate. This password was printed to your console at the end of running the `algo` deployment script. Please note that in some cases, extracting the certificate can take several minutes. | [![step7-thumb]][step7-screen] |
|
||||
| 8. Give your certificate a name (it will default to your Algo username), and ensure that "Credential use" is set to "VPN and apps". Tap "OK". | [![step8-thumb]][step8-screen] |
|
||||
| 9. You'll then be brought to another prompt. Ensure your newly imported certificate is selected, and tap "ALLOW". Then, tap "SAVE" in the top right. | [![step9-thumb]][step9-screen] |
|
||||
| 10. You will be returned to the main menu, and your newly-configured VPN profile should be listed. Tap the profile to connect. | [![step10-thumb]][step10-screen] |
|
||||
|
||||
## Troubleshooting
|
||||
### Tapping the VPN profile in strongSwan has no effect.
|
||||
Ensure that "WiFi Assistant" and any other always-on VPNs are disabled before attempting to enable a strongSwan VPN. If any other VPN is active, strongSwan may silently fail to initialize a VPN connection. On Android 7, your can manage your VPNs by going to: Settings > Tap "More" under "Wireless & networks" > VPN > tap the gear icon next to any non-strongSwan VPNs listed and ensure they are disabled.
|
||||
|
||||
|
||||
[step3-thumb]: https://i.imgur.com/LPwIGJE.png
|
||||
[step4-thumb]: https://i.imgur.com/sFkDILg.png
|
||||
[step5-thumb]: https://i.imgur.com/IliT5oD.png
|
||||
[step6-thumb]: https://i.imgur.com/oghdCVp.png
|
||||
[step7-thumb]: https://i.imgur.com/nDzJ7KS.png
|
||||
[step8-thumb]: https://i.imgur.com/RPXSpCo.png
|
||||
[step9-thumb]: https://i.imgur.com/uMinDPe.png
|
||||
[step10-thumb]: https://i.imgur.com/hUEDjdo.png
|
||||
|
||||
|
||||
[step3-screen]: https://i.imgur.com/xNMihCd.png
|
||||
[step4-screen]: https://i.imgur.com/xYjoNNO.png
|
||||
[step5-screen]: https://i.imgur.com/4qhKT1Z.png
|
||||
[step6-screen]: https://i.imgur.com/MAaQuxH.png
|
||||
[step7-screen]: https://i.imgur.com/aT2MPih.png
|
||||
[step8-screen]: https://i.imgur.com/gvaKzkh.png
|
||||
[step9-screen]: https://i.imgur.com/eZp8DNb.png
|
||||
[step10-screen]: https://i.imgur.com/Nd8rYMJ.png
|
||||
1. [Install the WireGuard VPN Client](https://play.google.com/store/apps/details?id=com.wireguard.android).
|
||||
2. Open QR code `configs/<ip_address>/wireguard/<username>.png` and scan it in the WireGuard app
|
||||
|
|
55
docs/client-linux-wireguard.md
Normal file
|
@ -0,0 +1,55 @@
|
|||
# Using Ubuntu Server as a Client with WireGuard
|
||||
|
||||
## Install WireGuard
|
||||
|
||||
To connect to your Algo VPN using [WireGuard](https://www.wireguard.com) from an Ubuntu Server 16.04 (Xenial) or 18.04 (Bionic) client, first install WireGuard on the client:
|
||||
|
||||
```shell
|
||||
# Add the WireGuard repository:
|
||||
sudo add-apt-repository ppa:wireguard/wireguard
|
||||
|
||||
# Update the list of available packages (not necessary on Bionic):
|
||||
sudo apt update
|
||||
|
||||
# Install the tools and kernel module:
|
||||
sudo apt install wireguard
|
||||
```
|
||||
|
||||
(For installation on other Linux distributions, see the [Installation](https://www.wireguard.com/install/) page on the WireGuard site.)
|
||||
|
||||
## Locate the Config File
|
||||
|
||||
The Algo-generated config files for WireGuard are named `configs/<ip_address>/wireguard/<username>.conf` on the system where you ran `./algo`. One file was generated for each of the users you added to `config.cfg` before you ran `./algo`. Each Linux and Android client you connect to your Algo VPN must use a different WireGuard config file. Choose one of these files and copy it to your Linux client.
|
||||
|
||||
If your client is running Bionic (or another Linux that uses `systemd-resolved` for DNS) you should first edit the config file. Comment out the line that begins with `DNS =` and replace it with:
|
||||
```
|
||||
PostUp = systemd-resolve -i %i --set-dns=172.16.0.1 --set-domain=~.
|
||||
```
|
||||
Use the IP address shown on the `DNS =` line (for most, this will be `172.16.0.1`). If the `DNS =` line contains multiple IP addresses, use multiple `--set-dns=` options.
|
||||
|
||||
## Configure WireGuard
|
||||
|
||||
Finally, install the config file on your client as `/etc/wireguard/wg0.conf` and start WireGuard:
|
||||
|
||||
```shell
|
||||
# Install the config file to the WireGuard configuration directory on your
|
||||
# Bionic or Xenial client:
|
||||
sudo install -o root -g root -m 600 <username>.conf /etc/wireguard/wg0.conf
|
||||
|
||||
# Start the WireGuard VPN:
|
||||
sudo systemctl start wg-quick@wg0
|
||||
|
||||
# Check that it started properly:
|
||||
sudo systemctl status wg-quick@wg0
|
||||
|
||||
# Verify the connection to the Algo VPN:
|
||||
sudo wg
|
||||
|
||||
# See that your client is using the IP address of your Algo VPN:
|
||||
curl ipv4.icanhazip.com
|
||||
|
||||
# Optionally configure the connection to come up at boot time:
|
||||
sudo systemctl enable wg-quick@wg0
|
||||
```
|
||||
|
||||
(If your Linux distribution does not use `systemd`, you can bring up WireGuard with `sudo wg-quick up wg0`).
|
|
@ -73,6 +73,6 @@ In this example we'll assume the IP of our Algo VPN server is `1.2.3.4` and the
|
|||
* For the later 2 options, hover to option in the settings to see a description
|
||||
* Cipher proposal:
|
||||
* Check *Enable custom proposals*
|
||||
* IKE: `aes128gcm16-prfsha512-ecp256,aes128-sha2_512-prfsha512-ecp256,aes128-sha2_384-prfsha384-ecp256`
|
||||
* ESP: `aes128gcm16-ecp256,aes128-sha2_512-prfsha512-ecp256`
|
||||
* IKE: `aes256gcm16-prfsha512-ecp384,aes256-sha2_512-prfsha512-ecp384,aes256-sha2_384-prfsha384-ecp384`
|
||||
* ESP: `aes256gcm16-ecp384,aes256-sha2_512-prfsha512-ecp384`
|
||||
* Apply and turn the connection on, you should now be connected
|
||||
|
|
33
docs/client-macos-wireguard.md
Normal file
|
@ -0,0 +1,33 @@
|
|||
# Using MacOS as a Client with WireGuard
|
||||
|
||||
## Install WireGuard
|
||||
|
||||
To connect to your Algo VPN using [WireGuard](https://www.wireguard.com) from MacOS
|
||||
|
||||
```
|
||||
# Install the wireguard-go userspace driver
|
||||
brew install wireguard-tools
|
||||
```
|
||||
|
||||
## Locate the Config File
|
||||
|
||||
The Algo-generated config files for WireGuard are named `configs/<ip_address>/wireguard/<username>.conf` on the system where you ran `./algo`. One file was generated for each of the users you added to `config.cfg` before you ran `./algo`. Each Linux and Android client you connect to your Algo VPN must use a different WireGuard config file. Choose one of these files and copy it to your device.
|
||||
|
||||
## Configure WireGuard
|
||||
|
||||
Finally, install the config file on your client as `/usr/local/etc/wireguard/wg0.conf` and start WireGuard:
|
||||
|
||||
```
|
||||
# Install the config file to the WireGuard configuration directory on your MacOS device
|
||||
mkdir /usr/local/etc/wireguard/
|
||||
cp <username>.conf /usr/local/etc/wireguard/wg0.conf
|
||||
|
||||
# Start the WireGuard VPN:
|
||||
sudo wg-quick up wg0
|
||||
|
||||
# Verify the connection to the Algo VPN:
|
||||
wg
|
||||
|
||||
# See that your client is using the IP address of your Algo VPN:
|
||||
curl ipv4.icanhazip.com
|
||||
```
|
|
@ -1,6 +1,6 @@
|
|||
# Windows client manual setup
|
||||
|
||||
## Automatic installtion
|
||||
## Automatic installation
|
||||
|
||||
To install automatically, use the generated user Powershell script.
|
||||
|
||||
|
@ -8,7 +8,7 @@ To install automatically, use the generated user Powershell script.
|
|||
2. Open Powershell as Administrator.
|
||||
3. Run the following command:
|
||||
```powershell
|
||||
powershell -ExecutionPolicy ByPass -File C:\path\to\windows_USER.ps1 -Add
|
||||
powershell -ExecutionPolicy ByPass -File C:\path\to\windows_USER.ps1 Add
|
||||
```
|
||||
4. The command has help information available. To view its full help, run this from Powershell:
|
||||
```powershell
|
||||
|
@ -27,6 +27,8 @@ Set-ExecutionPolicy Unrestricted -Scope CurrentUser
|
|||
|
||||
4. In the same window, run the necessary commands to install the certificates and create the VPN configuration. Note the lines at the top defining the VPN address, USER.p12 file location, and CA certificate location - change those lines to the IP address of your Algo server and the location you saved those two files. Also note that it will prompt for the "User p12 password", which is printed at the end of a successful Algo deployment.
|
||||
|
||||
If you have more than one account on your Windows 10 machine (e.g. one with administrator privileges and one without) and would like to have the VPN connection available to all users, then insert the line `AllUserConnection = $true` after `$EncryptionLevel = "Required"`.
|
||||
|
||||
```powershell
|
||||
$VpnServerAddress = "1.2.3.4"
|
||||
$UserP12Path = "$Home\Downloads\USER.p12"
|
||||
|
@ -48,12 +50,12 @@ Add-VpnConnection @addVpnParams
|
|||
|
||||
$setVpnParams = @{
|
||||
ConnectionName = $VpnName
|
||||
AuthenticationTransformConstants = "GCMAES128"
|
||||
CipherTransformConstants = "GCMAES128"
|
||||
EncryptionMethod = "AES128"
|
||||
AuthenticationTransformConstants = "GCMAES256"
|
||||
CipherTransformConstants = "GCMAES256"
|
||||
EncryptionMethod = "AES256"
|
||||
IntegrityCheckMethod = "SHA384"
|
||||
DHGroup = "ECP256"
|
||||
PfsGroup = "ECP256"
|
||||
DHGroup = "ECP384"
|
||||
PfsGroup = "ECP384"
|
||||
Force = $true
|
||||
}
|
||||
Set-VpnConnectionIPsecConfiguration @setVpnParams
|
||||
|
|
117
docs/cloud-amazon-ec2.md
Normal file
|
@ -0,0 +1,117 @@
|
|||
# Amazon EC2 cloud setup
|
||||
|
||||
## AWS account creation
|
||||
|
||||
Creating an Amazon AWS account requires giving Amazon a phone number that can receive a call and has a number pad to enter a PIN challenge displayed in the browser. This phone system prompt occasionally fails to correctly validate input, but try again (request a new PIN in the browser) until you succeed.
|
||||
|
||||
### Select an EC2 plan
|
||||
|
||||
The cheapest EC2 plan you can choose is the "Free Plan" a.k.a. the "AWS Free Tier." It is only available to new AWS customers, it has limits on usage, and it converts to standard pricing after 12 months (the "introductory period"). After you exceed the usage limits, after the 12 month period, or if you are an existing AWS customer, then you will pay standard pay-as-you-go service prices.
|
||||
|
||||
*Note*: Your Algo instance will not stop working when you hit the bandwidth limit, you will just start accumulating service charges on your AWS account.
|
||||
|
||||
As of the time of this writing (July 2018), the Free Tier limits include "750 hours of Amazon EC2 Linux t2.micro instance usage" per month, 15 GB of bandwidth (outbound) per month, and 30 GB of cloud storage. Algo will not even use 1% of the storage limit, but you may have to monitor your bandwidth usage or keep an eye out for the email from Amazon when you are about to exceed the Free Tier limits.
|
||||
|
||||
### Create an AWS permissions policy
|
||||
|
||||
In the AWS console, find the policies menu: click Services > IAM > Policies. Click Create Policy.
|
||||
|
||||
Here, you have the policy editor. Switch to the JSON tab and copy-paste over the existing empty policy with [the minimum required AWS policy needed for Algo deployment](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#minimum-required-iam-permissions-for-deployment).
|
||||
|
||||

|
||||
|
||||
### Set up an AWS user
|
||||
|
||||
In the AWS console, find the users (“Identity and Access Management”, a.k.a. IAM users) menu: click Services > IAM.
|
||||
|
||||
Activate multi-factor authentication (MFA) on your root account. The simplest choice is the mobile app "Google Authenticator." A hardware U2F token is ideal (less prone to a phishing attack), but a TOTP authenticator like this is good enough.
|
||||
|
||||

|
||||
|
||||
Now "Create individual IAM users" and click Add User. Create a user name. I chose “algovpn”. Then click the box next to Programmatic Access. Then click Next.
|
||||
|
||||

|
||||
|
||||
Next, click “Attach existing policies directly.” Type “Algo” in the search box to filter the policies. Find “AlgoVPN_Provisioning” (the policy you created) and click the checkbox next to that. Click Next when you’re done.
|
||||
|
||||

|
||||
|
||||
The user creation confirmation screen should look like this if you've done everything correctly.
|
||||
|
||||

|
||||
|
||||
On the final screen, click the Download CSV button. This file includes the AWS access keys you’ll need during the Algo set-up process. Click Close, and you’re all set.
|
||||
|
||||

|
||||
|
||||
## Using EC2 during Algo setup
|
||||
|
||||
After you have downloaded Algo and installed its dependencies, the next step is running Algo to provision the VPN server on your AWS account.
|
||||
|
||||
First you will be asked which server type to setup. You would want to enter "2" to use Amazon EC2.
|
||||
|
||||
```
|
||||
$ ./algo
|
||||
|
||||
What provider would you like to use?
|
||||
1. DigitalOcean
|
||||
2. Amazon EC2
|
||||
3. Microsoft Azure
|
||||
4. Google Compute Engine
|
||||
5. Scaleway
|
||||
6. OpenStack (DreamCompute optimised)
|
||||
7. Install to existing Ubuntu 16.04 server (Advanced)
|
||||
|
||||
Enter the number of your desired provider
|
||||
: 2
|
||||
```
|
||||
|
||||
Next you will be asked for the AWS Access Key (Access Key ID) and AWS Secret Key (Secret Access Key) that you received in the CSV file when you setup the account (don't worry if you don't see your text entered in the console; the key input is hidden here by Algo).
|
||||
|
||||
```
|
||||
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md).
|
||||
[pasted values will not be displayed]
|
||||
[AKIA...]:
|
||||
|
||||
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
[pasted values will not be displayed]
|
||||
[ABCD...]:
|
||||
```
|
||||
|
||||
You will be prompted for the server name to enter. Feel free to leave this as the default ("algo") if you are not certain how this will affect your setup. Here we chose to call it "algovpn".
|
||||
|
||||
```
|
||||
Name the vpn server:
|
||||
[algo]: algovpn
|
||||
```
|
||||
|
||||
After entering the server name, the script ask which region you wish to setup your new Algo instance in. Enter the number next to name of the region.
|
||||
|
||||
```
|
||||
What region should the server be located in?
|
||||
1. us-east-1 US East (N. Virginia)
|
||||
2. us-east-2 US East (Ohio)
|
||||
3. us-west-1 US West (N. California)
|
||||
4. us-west-2 US West (Oregon)
|
||||
5. ca-central-1 Canada (Central)
|
||||
6. eu-central-1 EU (Frankfurt)
|
||||
7. eu-west-1 EU (Ireland)
|
||||
8. eu-west-2 EU (London)
|
||||
9. eu-west-3 EU (Paris)
|
||||
10. ap-northeast-1 Asia Pacific (Tokyo)
|
||||
11. ap-northeast-2 Asia Pacific (Seoul)
|
||||
12. ap-northeast-3 Asia Pacific (Osaka-Local)
|
||||
13. ap-southeast-1 Asia Pacific (Singapore)
|
||||
14. ap-southeast-2 Asia Pacific (Sydney)
|
||||
15. ap-south-1 Asia Pacific (Mumbai)
|
||||
16. sa-east-1 South America (São Paulo)
|
||||
|
||||
Enter the number of your desired region:
|
||||
[1]: 10
|
||||
```
|
||||
|
||||
You will then be asked the remainder of the standard Algo setup questions.
|
||||
|
||||
## Cleanup
|
||||
If you've installed Algo onto EC2 multiple times, your AWS account may become cluttered with unused or deleted resources e.g. instances, VPCs, subnets, etc. This may cause future installs to fail. The easiest way to clean up after you're done with a server is to go to "CloudFormation" from the console and delete the CloudFormation stack associated with that server. Please note that unless you've enabled termination protection on your instance, deleting the stack this way will delete your instance without warning, so be sure you are deleting the correct stack.
|
|
@ -12,7 +12,7 @@ On the **Tokens/Keys** tab, select **Generate New Token**. A dialog will pop up.
|
|||
|
||||

|
||||
|
||||
You will be returned to the **Tokens/Keys** tab, and your new key will be shown under the **Personal Access Tokens** header.
|
||||
You will be returned to the **Tokens/Keys** tab, and your new key will be shown under the **Personal Access Tokens** header.
|
||||
|
||||

|
||||
|
||||
|
@ -20,9 +20,9 @@ Copy or note down the hash that shows below the name you entered, as this will b
|
|||
|
||||
## Using DigitalOcean with Algo (command)
|
||||
|
||||
These steps are for people who run Algo using Docker or using the "algo" command.
|
||||
These steps are for people who run Algo using Docker or using the "algo" command.
|
||||
|
||||
First you will be asked which server type to setup. You would want to enter "1" to use DigitalOcean.
|
||||
First you will be asked which server type to setup. You would want to enter "1" to use DigitalOcean.
|
||||
|
||||
```
|
||||
What provider would you like to use?
|
||||
|
@ -33,7 +33,7 @@ First you will be asked which server type to setup. You would want to enter "1"
|
|||
5. Google Compute Engine
|
||||
6. Scaleway
|
||||
7. OpenStack (DreamCompute optimised)
|
||||
8. Install to existing Ubuntu 16.04 server
|
||||
8. Install to existing Ubuntu 18.04 server
|
||||
|
||||
Enter the number of your desired provider
|
||||
: 1
|
||||
|
@ -44,17 +44,17 @@ Next you will be asked for the API Token value. Paste the API Token value you co
|
|||
```
|
||||
Enter your API token. The token must have read and write permissions (https://cloud.digitalocean.com/settings/api/tokens):
|
||||
[pasted values will not be displayed]
|
||||
:
|
||||
:
|
||||
```
|
||||
|
||||
You will be prompted for the server name to enter. Feel free to leave this as the default ("algo.local") if you are not certain how this will affect your setup.
|
||||
|
||||
```
|
||||
Name the vpn server:
|
||||
[algo.local]:
|
||||
[algo.local]:
|
||||
```
|
||||
|
||||
After entering the server name the script ask which region you wish to setup your new Algo instance in. Enter the number next to name of the region.
|
||||
After entering the server name the script ask which region you wish to setup your new Algo instance in. Enter the number next to name of the region.
|
||||
|
||||
```
|
||||
What region should the server be located in?
|
||||
|
@ -78,10 +78,10 @@ You will then be asked the remainder of the setup questions.
|
|||
|
||||
## Using DigitalOcean with Algo (via Ansible)
|
||||
|
||||
If you are using Ansible to deploy to DigitalOcean, you will need to pass the API Token to Ansible as `do_access_token`.
|
||||
If you are using Ansible to deploy to DigitalOcean, you will need to pass the API Token to Ansible as `do_token`.
|
||||
|
||||
For example,
|
||||
|
||||
ansible-playbook deploy.yml -t digitalocean,vpn,cloud -e 'do_access_token=my_secret_token do_server_name=algo.local do_region=ams2
|
||||
|
||||
Where "my_secret_token" is your API Token.
|
||||
ansible-playbook deploy.yml -e 'provider=digitalocean do_token=my_secret_token'
|
||||
|
||||
Where "my_secret_token" is your API Token. For more references see [deploy-from-ansible](deploy-from-ansible.md)
|
||||
|
|
41
docs/cloud-gce.md
Normal file
|
@ -0,0 +1,41 @@
|
|||
# Google Cloud Platform setup
|
||||
|
||||
Follow the [installation instructions](https://cloud.google.com/sdk/) to have the CLI commands to interact with Google.
|
||||
|
||||
After creating an account and installing, login in on your account using `gcloud init`
|
||||
|
||||
### Creating a project
|
||||
|
||||
The recommendation on GCP is to group resources on **Projets**, so we will create one project to put our VPN server and service account restricted to it.
|
||||
|
||||
```bash
|
||||
## Create the project to group the resources
|
||||
### You might need to change it to have a global unique project id
|
||||
PROJECT_ID=${USER}-algo-vpn
|
||||
BILLING_ID="$(gcloud beta billing accounts list --format="value(ACCOUNT_ID)")"
|
||||
|
||||
gcloud projects create ${PROJECT_ID} --name algo-vpn --set-as-default
|
||||
gcloud beta billing projects link ${PROJECT_ID} --billing-account ${BILLING_ID}
|
||||
|
||||
## Create an account that have access to the VPN
|
||||
gcloud iam service-accounts create algo-vpn --display-name "Algo VPN"
|
||||
gcloud iam service-accounts keys create configs/gce.json \
|
||||
--iam-account algo-vpn@${PROJECT_ID}.iam.gserviceaccount.com
|
||||
gcloud projects add-iam-policy-binding ${PROJECT_ID} \
|
||||
--member serviceAccount:algo-vpn@${PROJECT_ID}.iam.gserviceaccount.com \
|
||||
--role roles/compute.admin
|
||||
gcloud projects add-iam-policy-binding ${PROJECT_ID} \
|
||||
--member serviceAccount:algo-vpn@${PROJECT_ID}.iam.gserviceaccount.com \
|
||||
--role roles/iam.serviceAccountUser
|
||||
|
||||
## Enable the services
|
||||
gcloud services enable compute.googleapis.com
|
||||
|
||||
./algo -e "provider=gce" -e "gce_credentials_file=$(pwd)/configs/gce.json"
|
||||
|
||||
```
|
||||
|
||||
**Attention:** take care of the `configs/gce.json` file, which contains the credentials to manage your Google Cloud account, including create and delete servers on this project.
|
||||
|
||||
|
||||
There are more advanced arguments available for deploynment [using ansible](deploy-from-ansible.md)
|
8
docs/cloud-vultr.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
### Configuration file
|
||||
|
||||
You need to create a configuration file in INI format with your api key (https://my.vultr.com/settings/#settingsapi)
|
||||
|
||||
```
|
||||
[default]
|
||||
key = <your api key>
|
||||
```
|
|
@ -11,74 +11,81 @@ You can deploy Algo non-interactively by running the Ansible playbooks directly
|
|||
Here is a full example for DigitalOcean:
|
||||
|
||||
```shell
|
||||
ansible-playbook deploy.yml -t digitalocean,vpn,cloud -e 'do_access_token=my_secret_token do_server_name=algo.local do_region=ams2'
|
||||
ansible-playbook main.yml -e "provider=digitalocean
|
||||
server_name=algo
|
||||
ondemand_cellular=false
|
||||
ondemand_wifi=false
|
||||
local_dns=true
|
||||
ssh_tunneling=true
|
||||
windows=false
|
||||
store_cakey=true
|
||||
region=ams3
|
||||
do_token=token"
|
||||
```
|
||||
|
||||
See below for more information about providers and extra variables
|
||||
|
||||
### Variables
|
||||
|
||||
- `provider` - (Required) The provider to use. See possible values below
|
||||
- `server_name` - (Required) Server name. Default: algo
|
||||
- `ondemand_cellular` (Optional) VPN On Demand when connected to cellular networks. Default: false
|
||||
- `ondemand_wifi` - (Optional. See `ondemand_wifi_exclude`) VPN On Demand when connected to WiFi networks. Default: false
|
||||
- `ondemand_wifi_exclude` (Required if `ondemand_wifi` set) - WiFi networks to exclude from using the VPN. Comma-separated values
|
||||
- `local_dns` - (Optional) Enable a DNS resolver. Default: false
|
||||
- `ssh_tunneling` - (Optional) Enable SSH tunneling for each user. Default: false
|
||||
- `windows` - (Optional) Enables compatible ciphers and key exchange to support Windows clietns, less secure. Default: false
|
||||
- `store_cakey` - (Optional) Whether or not keep the CA key (required to add users in the future, but less secure). Default: false
|
||||
|
||||
If any of those unspecified ansible will ask the user to input
|
||||
|
||||
### Ansible roles
|
||||
|
||||
Required tags:
|
||||
|
||||
- cloud
|
||||
Roles can be activated by specifying an extra variable `provider`
|
||||
|
||||
Cloud roles:
|
||||
|
||||
- role: cloud-digitalocean, tags: digitalocean
|
||||
- role: cloud-ec2, tags: ec2
|
||||
- role: cloud-gce, tags: gce
|
||||
- role: cloud-digitalocean, provider: digitalocean
|
||||
- role: cloud-ec2, provider: ec2
|
||||
- role: cloud-vultr, provider: vultr
|
||||
- role: cloud-gce, provider: gce
|
||||
- role: cloud-azure, provider: azure
|
||||
- role: cloud-scaleway, provider: scaleway
|
||||
- role: cloud-openstack, provider: openstack
|
||||
|
||||
Server roles:
|
||||
|
||||
- role: vpn, tags: vpn
|
||||
- role: dns_adblocking, tags: dns, adblock
|
||||
- role: security, tags: security
|
||||
- role: ssh_tunneling, tags: ssh_tunneling
|
||||
- role: vpn
|
||||
- role: dns_adblocking
|
||||
- role: dns_encryption
|
||||
- role: ssh_tunneling
|
||||
- role: wireguard
|
||||
|
||||
Note: The `vpn` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables:
|
||||
|
||||
- OnDemandEnabled_WIFI=Y
|
||||
- OnDemandEnabled_WIFI_EXCLUDE=HomeNet
|
||||
- OnDemandEnabled_Cellular=Y
|
||||
- ondemand_wifi: true
|
||||
- ondemand_wifi_exclude: HomeNet,OfficeWifi
|
||||
- ondemand_cellular: true
|
||||
|
||||
### Local Installation
|
||||
|
||||
Required tags:
|
||||
|
||||
- local
|
||||
- role: local, provider: local
|
||||
|
||||
Required variables:
|
||||
|
||||
- server_ip
|
||||
- server_user
|
||||
- IP_subject_alt_name
|
||||
- server - IP address of your server
|
||||
- ca_password - Password for the private CA key
|
||||
|
||||
Note that by default, the iptables rules on your existing server will be overwritten. If you don't want to overwrite the iptables rules, you can use the `--skip-tags iptables` flag, for example:
|
||||
|
||||
```shell
|
||||
ansible-playbook deploy.yml -t local,vpn --skip-tags iptables -e 'server_ip=172.217.2.238 server_user=algo IP_subject_alt_name=172.217.2.238'
|
||||
```
|
||||
Note that by default, the iptables rules on your existing server will be overwritten. If you don't want to overwrite the iptables rules, you can use the `--skip-tags iptables` flag.
|
||||
|
||||
### Digital Ocean
|
||||
|
||||
Required variables:
|
||||
|
||||
- do_access_token
|
||||
- do_server_name
|
||||
- do_region
|
||||
- do_token
|
||||
- region
|
||||
|
||||
Possible options for `do_region`:
|
||||
|
||||
- ams2
|
||||
- ams3
|
||||
- fra1
|
||||
- lon1
|
||||
- nyc1
|
||||
- nyc2
|
||||
- nyc3
|
||||
- sfo1
|
||||
- sfo2
|
||||
- sgp1
|
||||
- tor1
|
||||
- blr1
|
||||
Possible options can be gathered calling to https://api.digitalocean.com/v2/regions
|
||||
|
||||
### Amazon EC2
|
||||
|
||||
|
@ -86,27 +93,13 @@ Required variables:
|
|||
|
||||
- aws_access_key
|
||||
- aws_secret_key
|
||||
- aws_server_name
|
||||
- region
|
||||
|
||||
Possible options for `region`:
|
||||
Possible options can be gathered via cli `aws ec2 describe-regions`
|
||||
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
- ap-south-1
|
||||
- ap-northeast-2
|
||||
- ap-southeast-1
|
||||
- ap-southeast-2
|
||||
- ap-northeast-1
|
||||
- eu-central-1
|
||||
- eu-west-1
|
||||
- eu-west-2
|
||||
Additional variables:
|
||||
|
||||
Additional tags:
|
||||
|
||||
- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) (enabled by default)
|
||||
- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: false)
|
||||
|
||||
#### Minimum required IAM permissions for deployment:
|
||||
|
||||
|
@ -120,6 +113,7 @@ Additional tags:
|
|||
"Action": [
|
||||
"ec2:DescribeImages",
|
||||
"ec2:DescribeKeyPairs",
|
||||
"ec2:DescribeRegions",
|
||||
"ec2:ImportKeyPair"
|
||||
],
|
||||
"Resource": [
|
||||
|
@ -178,43 +172,76 @@ Additional tags:
|
|||
|
||||
Required variables:
|
||||
|
||||
- credentials_file
|
||||
- gce_server_name
|
||||
- ssh_public_key
|
||||
- zone
|
||||
- gce_credentials_file
|
||||
- [region](https://cloud.google.com/compute/docs/regions-zones/)
|
||||
|
||||
Possible options for `zone`:
|
||||
### Vultr
|
||||
|
||||
- us-west1-a
|
||||
- us-west1-b
|
||||
- us-west1-c
|
||||
- us-central1-a
|
||||
- us-central1-b
|
||||
- us-central1-c
|
||||
- us-central1-f
|
||||
- us-east4-a
|
||||
- us-east4-b
|
||||
- us-east4-c
|
||||
- us-east1-b
|
||||
- us-east1-c
|
||||
- us-east1-d
|
||||
- europe-west1-b
|
||||
- europe-west1-c
|
||||
- europe-west1-d
|
||||
- europe-west2-a
|
||||
- europe-west2-b
|
||||
- europe-west2-c
|
||||
- europe-west3-a
|
||||
- europe-west3-b
|
||||
- europe-west3-c
|
||||
- asia-southeast1-a
|
||||
- asia-southeast1-b
|
||||
- asia-east1-a
|
||||
- asia-east1-b
|
||||
- asia-east1-c
|
||||
- asia-northeast1-a
|
||||
- asia-northeast1-b
|
||||
- asia-northeast1-c
|
||||
- australia-southeast1-a
|
||||
- australia-southeast1-b
|
||||
- australia-southeast1-c
|
||||
Required variables:
|
||||
|
||||
- [vultr_config](https://github.com/trailofbits/algo/docs/cloud-vultr.md)
|
||||
- [region](https://api.vultr.com/v1/regions/list)
|
||||
|
||||
### Azure
|
||||
|
||||
Required variables:
|
||||
|
||||
- azure_secret
|
||||
- azure_tenant
|
||||
- azure_client_id
|
||||
- azure_subscription_id
|
||||
- [region](https://azure.microsoft.com/en-us/global-infrastructure/regions/)
|
||||
|
||||
### Lightsail
|
||||
|
||||
Required variables:
|
||||
|
||||
- aws_access_key
|
||||
- aws_secret_key
|
||||
- region
|
||||
|
||||
Possible options can be gathered via cli `aws lightsail get-regions`
|
||||
|
||||
### Scaleway
|
||||
|
||||
Required variables:
|
||||
|
||||
- [scaleway_token](https://www.scaleway.com/docs/generate-an-api-token/)
|
||||
- [scaleway_org](https://cloud.scaleway.com/#/billing)
|
||||
- region
|
||||
|
||||
Possible regions:
|
||||
|
||||
- ams1
|
||||
- par1
|
||||
|
||||
### OpenStack
|
||||
|
||||
You need to source the rc file prior to run Algo. Download it from the OpenStack dashboard->Compute->API Access and source it in the shell (eg: source /tmp/dhc-openrc.sh)
|
||||
|
||||
|
||||
### Local
|
||||
|
||||
Required variables:
|
||||
|
||||
- server - IP or hostname to access the server via SSH
|
||||
- endpoint - Public IP address of your server
|
||||
- ssh_user
|
||||
|
||||
|
||||
### Update users
|
||||
|
||||
Playbook:
|
||||
|
||||
```
|
||||
users.yml
|
||||
```
|
||||
|
||||
Required variables:
|
||||
|
||||
- server - IP or hostname to access the server via SSH
|
||||
- ca_password - Password to access the CA key
|
||||
|
||||
Tags required:
|
||||
|
||||
- update-users
|
||||
|
|
|
@ -4,7 +4,7 @@ While it is not possible to run your Algo server from within a Docker container,
|
|||
|
||||
## Limitations
|
||||
|
||||
1. [Advanced](ADVANCED.md) installations are not currently supported; you must use the interactive `algo` script.
|
||||
1. [Advanced](deploy-from-ansible.md) installations are not currently supported; you must use the interactive `algo` script.
|
||||
2. This has not yet been tested with user namespacing enabled.
|
||||
3. If you're running this on Windows, take care when editing files under `configs/` to ensure that line endings are set appropriately for Unix systems.
|
||||
|
||||
|
@ -13,7 +13,7 @@ While it is not possible to run your Algo server from within a Docker container,
|
|||
1. Install [Docker](https://www.docker.com/community-edition#/download) -- setup and configuration is not covered here
|
||||
2. Create a local directory to hold your VPN configs (e.g. `C:\Users\trailofbits\Documents\VPNs\`)
|
||||
3. Create a local copy of [config.cfg](https://github.com/trailofbits/algo/blob/master/config.cfg), with required modifications (e.g. `C:\Users\trailofbits\Documents\VPNs\config.cfg`)
|
||||
4. Run the Docker container, mounting your configurations appropriately:
|
||||
4. Run the Docker container, mounting your configurations appropriately (assuming the container is named `trailofbits/algo` with a tag `latest`):
|
||||
- From Windows:
|
||||
```powershell
|
||||
C:\Users\trailofbits> docker run --cap-drop=all -it \
|
||||
|
@ -22,7 +22,7 @@ While it is not possible to run your Algo server from within a Docker container,
|
|||
```
|
||||
- From Linux:
|
||||
```bash
|
||||
$ docker run --cap-drop-all -it \
|
||||
$ docker run --cap-drop=all -it \
|
||||
-v /home/trailofbits/Documents/VPNs:/data \
|
||||
trailofbits/algo:latest
|
||||
```
|
||||
|
@ -61,7 +61,7 @@ Docker themselves provide a concept of [Content Trust](https://docs.docker.com/e
|
|||
## Future Improvements
|
||||
|
||||
1. Even though we're taking care to drop all capabilities to minimize the impact of running as root, we can probably include not only a `seccomp` profile, but also AppArmor and/or SELinux profiles as well.
|
||||
2. The Docker image doesn't natively support [advanced](ADVANCED.md) Algo deployments, which is useful for scripting. This can be done by launching an interactive shell and running the commands yourself.
|
||||
2. The Docker image doesn't natively support [advanced](deploy-from-ansible.md) Algo deployments, which is useful for scripting. This can be done by launching an interactive shell and running the commands yourself.
|
||||
3. The way configuration is passed into and out of the container is a bit kludgy. Hopefully future improvements in Docker volumes will make this a bit easier to handle.
|
||||
|
||||
## Advanced Usage
|
|
@ -26,5 +26,7 @@ device crypto
|
|||
## Installation
|
||||
|
||||
```shell
|
||||
ansible-playbook deploy.yml -t local,vpn -e "server_ip=$server_ip server_user=$server_user IP_subject_alt_name=$server_ip Store_CAKEY=N" --skip-tags cloud
|
||||
ansible-playbook main.yml -e "provider=local"
|
||||
```
|
||||
|
||||
And follow the instructions
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
# Local deployment
|
||||
|
||||
It is possible to download the Algo scripts to your own Ubuntu 16.04 server and run the scripts locally.
|
||||
It is possible to download the Algo scripts to your own Ubuntu 18.04 server and run the scripts locally.
|
||||
|
||||
In order to start, you need to install Ansible. Installing Ansible via pip requires pulling in a lot of dependencies, including a full compiler suite. It would be easier to use apt, however, Ubuntu 16.04 only comes with Ansible 2.0.0.2. The easiest solution is to install the Ansible PPA for a newer version of Ansible via apt, however, using a PPA requires installing `software-properties-common`.
|
||||
In order to start, you need to install Ansible. Installing Ansible via pip requires pulling in a lot of dependencies, including a full compiler suite. It would be easier to use apt, however, Ubuntu 18.04 only comes with Ansible 2.0.0.2. The easiest solution is to install the Ansible PPA for a newer version of Ansible via apt, however, using a PPA requires installing `software-properties-common`.
|
||||
|
||||
tl;dr:
|
||||
|
||||
```shell
|
||||
sudo apt-get install software-properties-common && sudo apt-add-repository ppa:ansible/ansible
|
||||
sudo apt-get update && sudo apt-get install ansible python-pip build-essential python-dev
|
||||
sudo apt-get update && sudo apt-get install ansible python-pip build-essential python-dev libssl-dev libffi-dev
|
||||
pip install virtualenv
|
||||
pip install --upgrade pip
|
||||
git clone https://github.com/trailofbits/algo
|
||||
|
@ -17,4 +17,4 @@ python -m virtualenv env && source env/bin/activate && python -m pip install -U
|
|||
./algo
|
||||
```
|
||||
|
||||
**Warning**: If you run Algo on your existing server, the iptables rules will be overwritten. If you don't want to overwrite the rules, you must deploy via `ansible-playbook` and skip the `iptables` tag as described in [deploy-from-ansible.md](deploy-from-ansible.md).
|
||||
**Warning**: Algo is intended to be run on a standalone server. If you run Algo on your existing server, the iptables rules will be overwritten. If you don't want to overwrite the rules, you must deploy via `ansible-playbook` and skip the `iptables` tag as described in [deploy-from-ansible.md](deploy-from-ansible.md). Other changes are also made, which can break other services running on your server (web, mail, etc.).
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Algo officially supports DigitalOcean, Amazon Web Services, Microsoft Azure, and Google Cloud Engine. If you want to deploy Algo on another virtual hosting provider, that provider must support:
|
||||
|
||||
1. the base operating system image that Algo uses (Ubuntu 16.04), and
|
||||
1. the base operating system image that Algo uses (Ubuntu 18.04), and
|
||||
2. a minimum of certain kernel modules required for the strongSwan IPsec server.
|
||||
|
||||
Please see the [Required Kernel Modules](https://wiki.strongswan.org/projects/strongswan/wiki/KernelModules) documentation from strongSwan for a list of the specific required modules and a script to check for them. As a first step, we recommend running their shell script to determine initial compatibility with your new hosting provider.
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
* [Where did the name "Algo" come from?](#where-did-the-name-algo-come-from)
|
||||
* [Can DNS filtering be disabled?](#can-dns-filtering-be-disabled)
|
||||
* [Wasn't IPSEC backdoored by the US government?](#wasnt-ipsec-backdoored-by-the-us-government)
|
||||
* [What inbound ports are used?](#what-inbound-ports-are-used)
|
||||
|
||||
## Has Algo been audited?
|
||||
|
||||
|
@ -70,3 +71,7 @@ No.
|
|||
> It's interesting that the bug was fixed without an advisory (oh to be a fly on the wall on ICB that day; Theo had a, um, a, "way" with his dev team). On the other hand, we don't know what releases of OpenBSD actually had the bug right now.
|
||||
>
|
||||
> It seems vanishingly unlikely that there could have been anything deliberate about this series of changes. You are unlikely to find anyone who will impugn Angelos. Meanwhile, the diffs tell exactly the opposite of the story that Greg Perry told.
|
||||
|
||||
## What inbound ports are used?
|
||||
|
||||
You should only need 22/TCP, 500/UDP, 4500/UDP, and 51820/UDP opened on any firewall that sits between your clients and your Algo server.
|
||||
|
|
BIN
docs/images/aws-ec2-attach-policy.png
Normal file
After Width: | Height: | Size: 96 KiB |
BIN
docs/images/aws-ec2-new-policy-review.png
Normal file
After Width: | Height: | Size: 102 KiB |
BIN
docs/images/aws-ec2-new-policy.png
Normal file
After Width: | Height: | Size: 85 KiB |
BIN
docs/images/aws-ec2-new-user-confirm.png
Normal file
After Width: | Height: | Size: 77 KiB |
BIN
docs/images/aws-ec2-new-user-csv.png
Normal file
After Width: | Height: | Size: 76 KiB |
BIN
docs/images/aws-ec2-new-user-name.png
Normal file
After Width: | Height: | Size: 102 KiB |
BIN
docs/images/aws-ec2-new-user.png
Normal file
After Width: | Height: | Size: 110 KiB |
|
@ -12,10 +12,10 @@
|
|||
* Cloud setup
|
||||
- Configure [Azure](cloud-azure.md)
|
||||
- Configure [DigitalOcean](cloud-do.md)
|
||||
- Configure [Vultr](cloud-vultr.md)
|
||||
* Advanced Deployment
|
||||
- Deploy to your own [FreeBSD](deploy-to-freebsd.md) server
|
||||
- Deploy to your own [Ubuntu 16.04](deploy-to-ubuntu.md) server
|
||||
- Deploy to your own [Ubuntu 18.04](deploy-to-ubuntu.md) server
|
||||
- Deploy to an [unsupported cloud provider](deploy-to-unsupported-cloud.md)
|
||||
* [FAQ](faq.md)
|
||||
* [Troubleshooting](troubleshooting.md)
|
||||
|
||||
|
|
|
@ -1,15 +1,23 @@
|
|||
# Troubleshooting
|
||||
|
||||
First of all, check [this](https://github.com/trailofbits/algo#features) and ensure that you are deploying to the supported ubuntu version.
|
||||
|
||||
* [Installation Problems](#installation-problems)
|
||||
* [Error: "You have not agreed to the Xcode license agreements"](#error-you-have-not-agreed-to-the-xcode-license-agreements)
|
||||
* [Error: checking whether the C compiler works... no](#error-checking-whether-the-c-compiler-works-no)
|
||||
* [Error: "fatal error: 'openssl/opensslv.h' file not found"](#error-fatal-error-opensslopensslvh-file-not-found)
|
||||
* [Error: "TypeError: must be str, not bytes"](#error-typeerror-must-be-str-not-bytes)
|
||||
* [Error: "ansible-playbook: command not found"](#error-ansible-playbook-command-not-found)
|
||||
* [Error: "Could not fetch URL ... TLSV1_ALERT_PROTOCOL_VERSION](#could-not-fetch-url--tlsv1_alert_protocol_version)
|
||||
* [Bad owner or permissions on .ssh](#bad-owner-or-permissions-on-ssh)
|
||||
* [The region you want is not available](#the-region-you-want-is-not-available)
|
||||
* [AWS: SSH permission denied with an ECDSA key](#aws-ssh-permission-denied-with-an-ecdsa-key)
|
||||
* [AWS: "Deploy the template" fails with CREATE_FAILED](#aws-deploy-the-template-fails-with-create_failed)
|
||||
* [AWS: SSH permission denied with an ECDSA key](#aws-ssh-permission-denied-with-an-ecdsa-key)
|
||||
* [AWS: "Deploy the template" fails with CREATE_FAILED](#aws-deploy-the-template-fails-with-create_failed)
|
||||
* [AWS: not authorized to perform: cloudformation:UpdateStack](#aws-not-authorized-to-perform-cloudformationupdatestack)
|
||||
* [DigitalOcean: error tagging resource 'xxxxxxxx': param is missing or the value is empty: resources](#digitalocean-error-tagging-resource)
|
||||
* [Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid](#windows-the-value-of-parameter-linuxconfigurationsshpublickeyskeydata-is-invalid)
|
||||
* [Docker: Failed to connect to the host via ssh](#docker-failed-to-connect-to-the-host-via-ssh)
|
||||
* [Wireguard: Unable to find 'configs/...' in expected paths](#wireguard-unable-to-find-configs-in-expected-paths)
|
||||
* [Connection Problems](#connection-problems)
|
||||
* [I'm blocked or get CAPTCHAs when I access certain websites](#im-blocked-or-get-captchas-when-i-access-certain-websites)
|
||||
* [I want to change the list of trusted Wifi networks on my Apple device](#i-want-to-change-the-list-of-trusted-wifi-networks-on-my-apple-device)
|
||||
|
@ -18,7 +26,9 @@
|
|||
* [I can't get my router to connect to the Algo server](#i-cant-get-my-router-to-connect-to-the-algo-server)
|
||||
* [I can't get Network Manager to connect to the Algo server](#i-cant-get-network-manager-to-connect-to-the-algo-server)
|
||||
* [Various websites appear to be offline through the VPN](#various-websites-appear-to-be-offline-through-the-vpn)
|
||||
* [Clients appear stuck in a reconnection loop](#clients-appear-stuck-in-a-reconnection-loop)
|
||||
* ["Error 809" or IKE_AUTH requests that never make it to the server](#error-809-or-ike_auth-requests-that-never-make-it-to-the-server)
|
||||
* [Windows: Parameter is incorrect](#windows-parameter-is-incorrect)
|
||||
* [I have a problem not covered here](#i-have-a-problem-not-covered-here)
|
||||
|
||||
## Installation Problems
|
||||
|
@ -75,7 +85,7 @@ You don't have a working compiler installed. You should install the XCode compil
|
|||
|
||||
### Error: "fatal error: 'openssl/opensslv.h' file not found"
|
||||
|
||||
On macOS, you tried to install pycrypto and encountered the following error:
|
||||
On macOS, you tried to install `cryptography` and encountered the following error:
|
||||
|
||||
```
|
||||
build/temp.macosx-10.12-intel-2.7/_openssl.c:434:10: fatal error: 'openssl/opensslv.h' file not found
|
||||
|
@ -94,7 +104,7 @@ Command /usr/bin/python -c "import setuptools, tokenize;__file__='/private/tmp/p
|
|||
Storing debug log for failure in /Users/algore/Library/Logs/pip.log
|
||||
```
|
||||
|
||||
You are running an old version of `pip` that cannot build the `pycrypto` dependency. Upgrade to a new version of `pip` by running `sudo pip install -U pip`.
|
||||
You are running an old version of `pip` that cannot download the binary `cryptography` dependency. Upgrade to a new version of `pip` by running `sudo pip install -U pip`.
|
||||
|
||||
### Error: "TypeError: must be str, not bytes"
|
||||
|
||||
|
@ -114,6 +124,22 @@ You tried to install Algo and you see an error that reads "ansible-playbook: com
|
|||
|
||||
You did not finish step 4 in the installation instructions, "[Install Algo's remaining dependencies](https://github.com/trailofbits/algo#deploy-the-algo-server)." Algo depends on [Ansible](https://github.com/ansible/ansible), an automation framework, and this error indicates that you do not have Ansible installed. Ansible is installed by `pip` when you run `python -m pip install -r requirements.txt`. You must complete the installation instructions to run the Algo server deployment process.
|
||||
|
||||
### Could not fetch URL ... TLSV1_ALERT_PROTOCOL_VERSION
|
||||
|
||||
You tried to install Algo and you received an error like this one:
|
||||
|
||||
```
|
||||
Could not fetch URL https://pypi.python.org/simple/secretstorage/: There was a problem confirming the ssl certificate: [SSL: TLSV1_ALERT_PROTOCOL_VERSION] tlsv1 alert protocol version (_ssl.c:590) - skipping
|
||||
Could not find a version that satisfies the requirement SecretStorage<3 (from -r requirements.txt (line 2)) (from versions: )
|
||||
No matching distribution found for SecretStorage<3 (from -r requirements.txt (line 2))
|
||||
```
|
||||
|
||||
It's time to upgrade your python.
|
||||
|
||||
`brew upgrade python2`
|
||||
|
||||
You can also download python 2.7.x from python.org.
|
||||
|
||||
### Bad owner or permissions on .ssh
|
||||
|
||||
You tried to run Algo and it quickly exits with an error about a bad owner or permissions:
|
||||
|
@ -148,7 +174,7 @@ In order to fix this issue, delete the `algo.pem` and `algo.pem.pub` keys from y
|
|||
|
||||
### AWS: "Deploy the template fails" with CREATE_FAILED
|
||||
|
||||
You tried to deploy to Algo to AWS and you received an error like this one:
|
||||
You tried to deploy Algo to AWS and you received an error like this one:
|
||||
|
||||
```
|
||||
TASK [cloud-ec2 : Make a cloudformation template] ******************************
|
||||
|
@ -160,7 +186,87 @@ fatal: [localhost]: FAILED! => {"changed": true, "events": ["StackEvent AWS::Clo
|
|||
|
||||
Algo builds a [Cloudformation](https://aws.amazon.com/cloudformation/) template to deploy to AWS. You can find the entire contents of the Cloudformation template in `configs/algo.yml`. In order to troubleshoot this issue, login to the AWS console, go to the Cloudformation service, find the failed deployment, click the events tab, and find the corresponding "CREATE_FAILED" events. Note that all AWS resources created by Algo are tagged with `Environment => Algo` for easy identification.
|
||||
|
||||
In many cases, failed deployments are the result of [service limits](http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) being reached, such as "CREATE_FAILED AWS::EC2::VPC VPC The maximum number of VPCs has been reached." In these cases, you must [contact AWS support](https://console.aws.amazon.com/support/home?region=us-east-1#/case/create?issueType=service-limit-increase&limitType=service-code-direct-connect) to increase the limits on your account.
|
||||
In many cases, failed deployments are the result of [service limits](http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) being reached, such as "CREATE_FAILED AWS::EC2::VPC VPC The maximum number of VPCs has been reached." In these cases, you must either [delete the VPCs from previous deployments](https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/working-with-vpcs.html#VPC_Deleting), or [contact AWS support](https://console.aws.amazon.com/support/home?region=us-east-1#/case/create?issueType=service-limit-increase&limitType=service-code-direct-connect) to increase the limits on your account.
|
||||
|
||||
### AWS: not authorized to perform: cloudformation:UpdateStack
|
||||
|
||||
You tried to deploy Algo to AWS and you received an error like this one:
|
||||
|
||||
```
|
||||
TASK [cloud-ec2 : Deploy the template] *****************************************
|
||||
fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "User: arn:aws:iam::082851645362:user/algo is not authorized to perform: cloudformation:UpdateStack on resource: arn:aws:cloudformation:us-east-1:082851645362:stack/algo/*"}
|
||||
```
|
||||
|
||||
This error indicates you already have Algo deployed to Cloudformation. Need to [delete it](cloud-amazon-ec2.md#cleanup) first, then re-deploy.
|
||||
|
||||
### DigitalOcean: error tagging resource
|
||||
|
||||
You tried to deploy Algo to DigitalOcean and you received an error like this one:
|
||||
|
||||
```
|
||||
TASK [cloud-digitalocean : Tag the droplet] ************************************
|
||||
failed: [localhost] (item=staging) => {"failed": true, "item": "staging", "msg": "error tagging resource '73204383': param is missing or the value is empty: resources"}
|
||||
failed: [localhost] (item=dbserver) => {"failed": true, "item": "dbserver", "msg": "error tagging resource '73204383': param is missing or the value is empty: resources"}
|
||||
```
|
||||
|
||||
The error is caused because Digital Ocean changed its API to treat the tag argument as a string instead of a number.
|
||||
|
||||
1. Download [doctl](https://github.com/digitalocean/doctl)
|
||||
2. Run `doctl auth init`; it will ask you for your token which you can get (or generate) on the API tab at DigitalOcean
|
||||
3. Once you are authorized on DO, you can run `doctl compute tag list` to see the list of tags
|
||||
4. Run `doctl compute tag delete enivronment:algo --force` to delete the environment:algo tag
|
||||
5. Finally run `doctl compute tag list` to make sure that the tag has been deleted
|
||||
6. Run algo as directed
|
||||
|
||||
### Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid
|
||||
|
||||
You tried to deploy Algo from Windows and you received an error like this one:
|
||||
|
||||
```
|
||||
TASK [cloud-azure : Create an instance].
|
||||
fatal: [localhost]: FAILED! => {"changed": false,
|
||||
"msg": "Error creating or updating virtual machine AlgoVPN - Azure Error:
|
||||
InvalidParameter\n
|
||||
Message: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid.\n
|
||||
Target: linuxConfiguration.ssh.publicKeys.keyData"}
|
||||
```
|
||||
|
||||
This is related to [the chmod issue](https://github.com/Microsoft/WSL/issues/81) inside /mnt directory which is NTFS. The fix is to place Algo outside of /mnt directory.
|
||||
|
||||
### Docker: Failed to connect to the host via ssh
|
||||
|
||||
You tried to deploy Algo from Docker and you received an error like this one:
|
||||
|
||||
```
|
||||
Failed to connect to the host via ssh:
|
||||
Warning: Permanently added 'xxx.xxx.xxx.xxx' (ECDSA) to the list of known hosts.\r\n
|
||||
Control socket connect(/root/.ansible/cp/6d9d22e981): Connection refused\r\n
|
||||
Failed to connect to new control master\r\n
|
||||
```
|
||||
|
||||
You need to add the following to the ansible.cfg in repo root:
|
||||
|
||||
```
|
||||
[ssh_connection]
|
||||
control_path_dir=/dev/shm/ansible_control_path
|
||||
```
|
||||
|
||||
### Wireguard: Unable to find 'configs/...' in expected paths
|
||||
|
||||
You tried to run Algo and you received an error like this one:
|
||||
|
||||
```
|
||||
TASK [wireguard : Generate public keys] ********************************************************************************
|
||||
[WARNING]: Unable to find 'configs/xxx.xxx.xxx.xxx/wireguard//private/dan' in expected paths.
|
||||
|
||||
fatal: [localhost]: FAILED! => {"msg": "An unhandled exception occurred while running the lookup plugin 'file'. Error was a <class 'ansible.errors.AnsibleError'>, original message: could not locate file in lookup: configs/xxx.xxx.xxx.xxx/wireguard//private/dan"}
|
||||
```
|
||||
This error is usually hit when using the local install option on a server that isn't Ubuntu 18.04. You should upgrade your server to Ubuntu 18.04. If this doesn't work, try removing `*.lock` files at /etc/wireguard/ as follows:
|
||||
|
||||
```ssh
|
||||
sudo rm -rf /etc/wireguard/*.lock
|
||||
```
|
||||
Then immediately re-run `./algo`.
|
||||
|
||||
## Connection Problems
|
||||
|
||||
|
@ -196,9 +302,11 @@ You're trying to connect Ubuntu or Debian to the Algo server through the Network
|
|||
|
||||
### Various websites appear to be offline through the VPN
|
||||
|
||||
This issue appears intermittently due to issues with MTU size. If you experience this issue, we recommend [filing an issue](https://github.com/trailofbits/algo/issues/new) for assistance. Advanced users can troubleshoot the correct MTU size by retrying `ping` with the "don't fragment" bit set, then decreasing packet size until it works. This will determine the correct MTU size for your network, which you then need to update on your network adapter.
|
||||
This issue appears intermittently due to issues with MTU size. Different networks may require the MTU within a specific range to correctly pass traffic. We made an effort to set the MTU to the most conservative, most compatible size by default but problems may still occur.
|
||||
|
||||
E.g., On Linux (client -- Ubuntu 16.04), connect to your IPsec tunnel then use the following commands to determine the correct MTU size:
|
||||
Advanced users can troubleshoot the correct MTU size by retrying `ping` with the "don't fragment" bit set, then decreasing packet size until it works. This will determine the correct MTU size for your network, which you then need to update on your network adapter.
|
||||
|
||||
E.g., On Linux (client -- Ubuntu 18.04), connect to your IPsec tunnel then use the following commands to determine the correct MTU size:
|
||||
```
|
||||
$ ping -M do -s 1500 www.google.com
|
||||
PING www.google.com (74.125.22.147) 1500(1528) bytes of data.
|
||||
|
@ -209,6 +317,19 @@ Then, set the MTU size on your network adapter (wlan0 or eth0):
|
|||
$ sudo ifconfig wlan0 mtu 1438
|
||||
```
|
||||
|
||||
You can also set the `max_mss` variable to a new value in config.cfg, and then redeploy your server rather than reconfigure the current one in-place.
|
||||
|
||||
### Clients appear stuck in a reconnection loop
|
||||
|
||||
If you're using 'Connect on Demand' on iOS and your client device appears stuck in a reconnection loop after switching from WiFi to LTE or vice versa, you may want to try disabling DoS protection in strongSwan.
|
||||
|
||||
The configuration value can be found in `/etc/strongswan.d/charon.conf`. After making the change you must reload or restart ipsec.
|
||||
|
||||
Example command:
|
||||
```
|
||||
sed -i -e 's/#*.dos_protection = yes/dos_protection = no/' /etc/strongswan.d/charon.conf && ipsec restart
|
||||
```
|
||||
|
||||
### "Error 809" or IKE_AUTH requests that never make it to the server
|
||||
|
||||
On Windows, this issue may manifest with an error message that says "The network connection between your computer and the VPN server could not be established because the remote server is not responding... This is Error 809." On other operating systems, you may try to debug the issue by capturing packets with tcpdump and notice that, while IKE_SA_INIT request and responses are exchanged between the client and server, IKE_AUTH requests never make it to the server.
|
||||
|
@ -241,6 +362,29 @@ Then rerun the dependency installation explicitly using python 2.7
|
|||
python2.7 -m virtualenv --python=`which python2.7` env && source env/bin/activate && python2.7 -m pip install -U pip && python2.7 -m pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Windows: Parameter is incorrect
|
||||
|
||||
The problem may happen if you recently moved to a new server, where you have Algo VPN.
|
||||
|
||||
1. Clear the Networking caches:
|
||||
- Run CMD (click windows start menu, type 'cmd', right click on 'Command Prompt' and select "Run as Administrator").
|
||||
- Type the commands below:
|
||||
```
|
||||
netsh int ip reset
|
||||
netsh int ipv6 reset
|
||||
netsh winsock reset
|
||||
```
|
||||
|
||||
3. Restart your computer
|
||||
4. Reset Device Manager adaptors:
|
||||
- Open Device Manager
|
||||
- Find Network Adapters
|
||||
- Uninstall WAN Miniport drivers (IKEv2, IP, IPv6, etc)
|
||||
- Click Action > Scan for hardware changes
|
||||
- The adapters you just uninstalled should come back
|
||||
|
||||
The VPN connection should work again
|
||||
|
||||
## I have a problem not covered here
|
||||
|
||||
If you have an issue that you cannot solve with the guidance here, [join our Gitter](https://gitter.im/trailofbits/algo) and ask for help. If you think you found a new issue in Algo, [file an issue](https://github.com/trailofbits/algo/issues/new).
|
||||
|
|
138
input.yml
Normal file
|
@ -0,0 +1,138 @@
|
|||
---
|
||||
- name: Ask user for the input
|
||||
hosts: localhost
|
||||
tags: always
|
||||
vars:
|
||||
defaults:
|
||||
server_name: algo
|
||||
ondemand_cellular: false
|
||||
ondemand_wifi: false
|
||||
local_dns: false
|
||||
ssh_tunneling: false
|
||||
windows: false
|
||||
store_cakey: false
|
||||
providers_map:
|
||||
- { name: DigitalOcean, alias: digitalocean }
|
||||
- { name: Amazon Lightsail, alias: lightsail }
|
||||
- { name: Amazon EC2, alias: ec2 }
|
||||
- { name: Vultr, alias: vultr }
|
||||
- { name: Microsoft Azure, alias: azure }
|
||||
- { name: Google Compute Engine, alias: gce }
|
||||
- { name: Scaleway, alias: scaleway}
|
||||
- { name: OpenStack (DreamCompute optimised), alias: openstack }
|
||||
- { name: Install to existing Ubuntu 18.04 server (Advanced), alias: local }
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
tasks:
|
||||
- pause:
|
||||
prompt: |
|
||||
What provider would you like to use?
|
||||
{% for p in providers_map %}
|
||||
{{ loop.index }}. {{ p['name']}}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired provider
|
||||
register: _algo_provider
|
||||
when: provider is undefined
|
||||
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}"
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Name the vpn server
|
||||
[algo]
|
||||
register: _algo_server_name
|
||||
when:
|
||||
- server_name is undefined
|
||||
- algo_provider != "local"
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS clients to enable "VPN On Demand" when connected to cellular networks?
|
||||
[y/N]
|
||||
register: _ondemand_cellular
|
||||
when: ondemand_cellular is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS clients to enable "VPN On Demand" when connected to Wi-Fi?
|
||||
[y/N]
|
||||
register: _ondemand_wifi
|
||||
when: ondemand_wifi is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
List the names of trusted Wi-Fi networks (if any) that macOS/iOS clients exclude from using the VPN
|
||||
(e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
|
||||
register: _ondemand_wifi_exclude
|
||||
when:
|
||||
- ondemand_wifi_exclude is undefined
|
||||
- (ondemand_wifi|default(false)|bool) or
|
||||
(booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want to install a DNS resolver on this VPN server, to block ads while surfing?
|
||||
[y/N]
|
||||
register: _local_dns
|
||||
when: local_dns is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want each user to have their own account for SSH tunneling?
|
||||
[y/N]
|
||||
register: _ssh_tunneling
|
||||
when: ssh_tunneling is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want the VPN to support Windows 10 or Linux Desktop clients? (enables compatible ciphers and key exchange, less secure)
|
||||
[y/N]
|
||||
register: _windows
|
||||
when: windows is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want to retain the CA key? (required to add users in the future, but less secure)
|
||||
[y/N]
|
||||
register: _store_cakey
|
||||
when: store_cakey is undefined
|
||||
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_server_name: >-
|
||||
{% if server_name is defined %}{% set _server = server_name %}
|
||||
{%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input != "" %}{% set _server = _algo_server_name.user_input %}
|
||||
{%- else %}{% set _server = defaults['server_name'] %}{% endif -%}
|
||||
{{ _server | regex_replace('(?!\.)(\W|_)', '-') }}
|
||||
algo_ondemand_cellular: >-
|
||||
{% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }}
|
||||
{%- elif _ondemand_cellular.user_input is defined and _ondemand_cellular.user_input != "" %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi: >-
|
||||
{% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }}
|
||||
{%- elif _ondemand_wifi.user_input is defined and _ondemand_wifi.user_input != "" %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi_exclude: >-
|
||||
{% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude }}
|
||||
{%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input != "" %}{{ _ondemand_wifi_exclude.user_input }}
|
||||
{%- else %}_null{% endif %}
|
||||
algo_local_dns: >-
|
||||
{% if local_dns is defined %}{{ local_dns | bool }}
|
||||
{%- elif _local_dns.user_input is defined and _local_dns.user_input != "" %}{{ booleans_map[_local_dns.user_input] | default(defaults['local_dns']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ssh_tunneling: >-
|
||||
{% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }}
|
||||
{%- elif _ssh_tunneling.user_input is defined and _ssh_tunneling.user_input != "" %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_windows: >-
|
||||
{% if windows is defined %}{{ windows | bool }}
|
||||
{%- elif _windows.user_input is defined and _windows.user_input != "" %}{{ booleans_map[_windows.user_input] | default(defaults['windows']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_store_cakey: >-
|
||||
{% if store_cakey is defined %}{{ store_cakey | bool }}
|
||||
{%- elif _store_cakey.user_input is defined and _store_cakey.user_input != "" %}{{ booleans_map[_store_cakey.user_input] | default(defaults['store_cakey']) }}
|
||||
{%- else %}false{% endif %}
|
|
@ -1,217 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: digital_ocean_tag
|
||||
short_description: Create and remove tag(s) to DigitalOcean resource.
|
||||
description:
|
||||
- Create and remove tag(s) to DigitalOcean resource.
|
||||
author: "Victor Volle (@kontrafiktion)"
|
||||
version_added: "2.2"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the tag. The supported characters for names include
|
||||
alphanumeric characters, dashes, and underscores.
|
||||
required: true
|
||||
resource_id:
|
||||
description:
|
||||
- The ID of the resource to operate on.
|
||||
- The data type of resource_id is changed from integer to string, from version 2.5.
|
||||
aliases: ['droplet_id']
|
||||
resource_type:
|
||||
description:
|
||||
- The type of resource to operate on. Currently, only tagging of
|
||||
droplets is supported.
|
||||
default: droplet
|
||||
choices: ['droplet']
|
||||
state:
|
||||
description:
|
||||
- Whether the tag should be present or absent on the resource.
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
api_token:
|
||||
description:
|
||||
- DigitalOcean api token.
|
||||
|
||||
notes:
|
||||
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN.
|
||||
They both refer to the v2 token.
|
||||
- As of Ansible 2.0, Version 2 of the DigitalOcean API is used.
|
||||
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: create a tag
|
||||
digital_ocean_tag:
|
||||
name: production
|
||||
state: present
|
||||
|
||||
- name: tag a resource; creating the tag if it does not exists
|
||||
digital_ocean_tag:
|
||||
name: "{{ item }}"
|
||||
resource_id: "73333005"
|
||||
state: present
|
||||
with_items:
|
||||
- staging
|
||||
- dbserver
|
||||
|
||||
- name: untag a resource
|
||||
digital_ocean_tag:
|
||||
name: staging
|
||||
resource_id: "73333005"
|
||||
state: absent
|
||||
|
||||
# Deleting a tag also untags all the resources that have previously been
|
||||
# tagged with it
|
||||
- name: remove a tag
|
||||
digital_ocean_tag:
|
||||
name: dbserver
|
||||
state: absent
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''
|
||||
data:
|
||||
description: a DigitalOcean Tag resource
|
||||
returned: success and no resource constraint
|
||||
type: dict
|
||||
sample: {
|
||||
"tag": {
|
||||
"name": "awesome",
|
||||
"resources": {
|
||||
"droplets": {
|
||||
"count": 0,
|
||||
"last_tagged": null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
from traceback import format_exc
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.digital_ocean import DigitalOceanHelper
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
def core(module):
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
resource_id = module.params['resource_id']
|
||||
resource_type = module.params['resource_type']
|
||||
|
||||
rest = DigitalOceanHelper(module)
|
||||
|
||||
# Check if api_token is valid or not
|
||||
response = rest.get('account')
|
||||
if response.status_code == 401:
|
||||
module.fail_json(msg='Failed to login using api_token, please verify '
|
||||
'validity of api_token')
|
||||
if state == 'present':
|
||||
response = rest.get('tags/{0}'.format(name))
|
||||
status_code = response.status_code
|
||||
resp_json = response.json
|
||||
changed = False
|
||||
if status_code == 200 and resp_json['tag']['name'] == name:
|
||||
changed = False
|
||||
else:
|
||||
# Ensure Tag exists
|
||||
response = rest.post("tags", data={'name': name})
|
||||
status_code = response.status_code
|
||||
resp_json = response.json
|
||||
if status_code == 201:
|
||||
changed = True
|
||||
elif status_code == 422:
|
||||
changed = False
|
||||
else:
|
||||
module.exit_json(changed=False, data=resp_json)
|
||||
|
||||
if resource_id is None:
|
||||
# No resource defined, we're done.
|
||||
module.exit_json(changed=changed, data=resp_json)
|
||||
else:
|
||||
# Check if resource is already tagged or not
|
||||
found = False
|
||||
url = "{0}?tag_name={1}".format(resource_type, name)
|
||||
if resource_type == 'droplet':
|
||||
url = "droplets?tag_name={0}".format(name)
|
||||
response = rest.get(url)
|
||||
status_code = response.status_code
|
||||
resp_json = response.json
|
||||
if status_code == 200:
|
||||
for resource in resp_json['droplets']:
|
||||
if not found and resource['id'] == int(resource_id):
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
# If resource is not tagged, tag a resource
|
||||
url = "tags/{0}/resources".format(name)
|
||||
payload = {
|
||||
'resources': [{
|
||||
'resource_id': resource_id,
|
||||
'resource_type': resource_type}]}
|
||||
response = rest.post(url, data=payload)
|
||||
if response.status_code == 204:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.fail_json(msg="error tagging resource '{0}': {1}".format(resource_id, response.json["message"]))
|
||||
else:
|
||||
# Already tagged resource
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
# Unable to find resource specified by user
|
||||
module.fail_json(msg=resp_json['message'])
|
||||
|
||||
elif state == 'absent':
|
||||
if resource_id:
|
||||
url = "tags/{0}/resources".format(name)
|
||||
payload = {
|
||||
'resources': [{
|
||||
'resource_id': resource_id,
|
||||
'resource_type': resource_type}]}
|
||||
response = rest.delete(url, data=payload)
|
||||
else:
|
||||
url = "tags/{0}".format(name)
|
||||
response = rest.delete(url)
|
||||
if response.status_code == 204:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=False, data=response.json)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True),
|
||||
resource_id=dict(aliases=['droplet_id'], type='str'),
|
||||
resource_type=dict(choices=['droplet'], default='droplet'),
|
||||
state=dict(choices=['present', 'absent'], default='present'),
|
||||
api_token=dict(aliases=['API_TOKEN'], no_log=True),
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,216 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.1'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_ami_copy
|
||||
short_description: copies AMI between AWS regions, return new image id
|
||||
description:
|
||||
- Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5
|
||||
version_added: "2.0"
|
||||
options:
|
||||
source_region:
|
||||
description:
|
||||
- the source region that AMI should be copied from
|
||||
required: true
|
||||
source_image_id:
|
||||
description:
|
||||
- the id of the image in source region that should be copied
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- The name of the new image to copy
|
||||
required: true
|
||||
default: null
|
||||
description:
|
||||
description:
|
||||
- An optional human-readable string describing the contents and purpose of the new AMI.
|
||||
required: false
|
||||
default: null
|
||||
encrypted:
|
||||
description:
|
||||
- Whether or not to encrypt the target image
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.2"
|
||||
kms_key_id:
|
||||
description:
|
||||
- KMS key id used to encrypt image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.2"
|
||||
wait:
|
||||
description:
|
||||
- wait for the copied AMI to be in state 'available' before returning.
|
||||
required: false
|
||||
default: false
|
||||
tags:
|
||||
description:
|
||||
- a hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
|
||||
required: false
|
||||
default: null
|
||||
|
||||
author: Amir Moulavi <amir.moulavi@gmail.com>, Tim C <defunct@defunct.io>
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Basic AMI Copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
|
||||
# AMI copy wait until available
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
wait: yes
|
||||
register: image_id
|
||||
|
||||
# Named AMI copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
name: My-Awesome-AMI
|
||||
description: latest patch
|
||||
|
||||
# Tagged AMI copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
tags:
|
||||
Name: My-Super-AMI
|
||||
Patch: 1.2.3
|
||||
|
||||
# Encrypted AMI copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
encrypted: yes
|
||||
|
||||
# Encrypted AMI copy with specified key
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
encrypted: yes
|
||||
kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info)
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.ec2
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError, NoCredentialsError, NoRegionError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
|
||||
def copy_image(ec2, module):
|
||||
"""
|
||||
Copies an AMI
|
||||
|
||||
module : AnsibleModule object
|
||||
ec2: ec2 connection object
|
||||
"""
|
||||
|
||||
tags = module.params.get('tags')
|
||||
|
||||
params = {'SourceRegion': module.params.get('source_region'),
|
||||
'SourceImageId': module.params.get('source_image_id'),
|
||||
'Name': module.params.get('name'),
|
||||
'Description': module.params.get('description'),
|
||||
'Encrypted': module.params.get('encrypted'),
|
||||
# 'KmsKeyId': module.params.get('kms_key_id')
|
||||
}
|
||||
if module.params.get('kms_key_id'):
|
||||
params['KmsKeyId'] = module.params.get('kms_key_id')
|
||||
|
||||
try:
|
||||
image_id = ec2.copy_image(**params)['ImageId']
|
||||
if module.params.get('wait'):
|
||||
ec2.get_waiter('image_available').wait(ImageIds=[image_id])
|
||||
if module.params.get('tags'):
|
||||
ec2.create_tags(
|
||||
Resources=[image_id],
|
||||
Tags=[{'Key' : k, 'Value': v} for k,v in module.params.get('tags').items()]
|
||||
)
|
||||
|
||||
module.exit_json(changed=True, image_id=image_id)
|
||||
except ClientError as ce:
|
||||
module.fail_json(msg=ce)
|
||||
except NoCredentialsError:
|
||||
module.fail_json(msg="Unable to locate AWS credentials")
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
source_region=dict(required=True),
|
||||
source_image_id=dict(required=True),
|
||||
name=dict(required=True),
|
||||
description=dict(default=''),
|
||||
encrypted=dict(type='bool', required=False),
|
||||
kms_key_id=dict(type='str', required=False),
|
||||
wait=dict(type='bool', default=False, required=False),
|
||||
tags=dict(type='dict')))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
# TODO: Check botocore version
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
if HAS_BOTO3:
|
||||
|
||||
try:
|
||||
ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url,
|
||||
**aws_connect_params)
|
||||
except NoRegionError:
|
||||
module.fail_json(msg='AWS Region is required')
|
||||
else:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
copy_image(ec2, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
139
library/gce_region_facts.py
Normal file
|
@ -0,0 +1,139 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright 2013 Google Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: gce_region_facts
|
||||
version_added: "5.3"
|
||||
short_description: Gather facts about GCE regions.
|
||||
description:
|
||||
- Gather facts about GCE regions.
|
||||
options:
|
||||
service_account_email:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
pem_file:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- path to the pem file associated with the service account email
|
||||
This option is deprecated. Use 'credentials_file'.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
credentials_file:
|
||||
version_added: "2.1.0"
|
||||
description:
|
||||
- path to the JSON file associated with the service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
project_id:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- your GCE project ID
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
|
||||
author: "Jack Ivanov (@jackivanov)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Gather facts about all regions
|
||||
- gce_region_facts:
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
regions:
|
||||
returned: on success
|
||||
description: >
|
||||
Each element consists of a dict with all the information related
|
||||
to that region.
|
||||
type: list
|
||||
sample: "[{
|
||||
"name": "asia-east1",
|
||||
"status": "UP",
|
||||
"zones": [
|
||||
{
|
||||
"name": "asia-east1-a",
|
||||
"status": "UP"
|
||||
},
|
||||
{
|
||||
"name": "asia-east1-b",
|
||||
"status": "UP"
|
||||
},
|
||||
{
|
||||
"name": "asia-east1-c",
|
||||
"status": "UP"
|
||||
}
|
||||
]
|
||||
}]"
|
||||
'''
|
||||
try:
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.compute.providers import get_driver
|
||||
from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
|
||||
_ = Provider.GCE
|
||||
HAS_LIBCLOUD = True
|
||||
except ImportError:
|
||||
HAS_LIBCLOUD = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.gce import gce_connect, unexpected_error_msg
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
service_account_email=dict(),
|
||||
pem_file=dict(type='path'),
|
||||
credentials_file=dict(type='path'),
|
||||
project_id=dict(),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_LIBCLOUD:
|
||||
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
|
||||
|
||||
gce = gce_connect(module)
|
||||
|
||||
changed = False
|
||||
gce_regions = []
|
||||
|
||||
try:
|
||||
regions = gce.ex_list_regions()
|
||||
for r in regions:
|
||||
gce_region = {}
|
||||
gce_region['name'] = r.name
|
||||
gce_region['status'] = r.status
|
||||
gce_region['zones'] = []
|
||||
for z in r.zones:
|
||||
gce_zone = {}
|
||||
gce_zone['name'] = z.name
|
||||
gce_zone['status'] = z.status
|
||||
gce_region['zones'].append(gce_zone)
|
||||
gce_regions.append(gce_region)
|
||||
json_output = { 'regions': gce_regions }
|
||||
module.exit_json(changed=False, results=json_output)
|
||||
except ResourceNotFoundError:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
102
library/lightsail_region_facts.py
Normal file
|
@ -0,0 +1,102 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lightsail_region_facts
|
||||
short_description: Gather facts about AWS Lightsail regions.
|
||||
description:
|
||||
- Gather facts about AWS Lightsail regions.
|
||||
version_added: "2.5.3"
|
||||
author: "Jack Ivanov (@jackivanov)"
|
||||
options:
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- boto3
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# Gather facts about all regions
|
||||
- lightsail_region_facts:
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
regions:
|
||||
returned: on success
|
||||
description: >
|
||||
Each element consists of a dict with all the information related
|
||||
to that region.
|
||||
type: list
|
||||
sample: "[{
|
||||
"availabilityZones": [],
|
||||
"continentCode": "NA",
|
||||
"description": "This region is recommended to serve users in the eastern United States",
|
||||
"displayName": "Virginia",
|
||||
"name": "us-east-1"
|
||||
}]"
|
||||
'''
|
||||
|
||||
import time
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import botocore
|
||||
HAS_BOTOCORE = True
|
||||
except ImportError:
|
||||
HAS_BOTOCORE = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
except ImportError:
|
||||
# will be caught by imported HAS_BOTO3
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
|
||||
HAS_BOTO3, camel_dict_to_snake_dict)
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='Python module "boto3" is missing, please install it')
|
||||
|
||||
if not HAS_BOTOCORE:
|
||||
module.fail_json(msg='Python module "botocore" is missing, please install it')
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
client = None
|
||||
try:
|
||||
client = boto3_conn(module, conn_type='client', resource='lightsail',
|
||||
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
|
||||
module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
|
||||
|
||||
response = client.get_regions(
|
||||
includeAvailabilityZones=False
|
||||
)
|
||||
module.exit_json(changed=False, results=response)
|
||||
except (botocore.exceptions.ClientError, Exception) as e:
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
9
main.yml
Normal file
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
- name: Include prompts playbook
|
||||
import_playbook: input.yml
|
||||
|
||||
- name: Include cloud provisioning playbook
|
||||
import_playbook: cloud.yml
|
||||
|
||||
- name: Include server configuration playbook
|
||||
import_playbook: server.yml
|
45
playbooks/cloud-post.yml
Normal file
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
- name: Set subjectAltName as afact
|
||||
set_fact:
|
||||
IP_subject_alt_name: "{% if algo_provider == 'local' %}{{ IP_subject_alt_name }}{% else %}{{ cloud_instance_ip }}{% endif %}"
|
||||
|
||||
- name: Add the server to an inventory group
|
||||
add_host:
|
||||
name: "{% if cloud_instance_ip == 'localhost' %}localhost{% else %}{{ cloud_instance_ip }}{% endif %}"
|
||||
groups: vpn-host
|
||||
ansible_connection: "{% if cloud_instance_ip == 'localhost' %}local{% else %}ssh{% endif %}"
|
||||
ansible_ssh_user: "{{ ansible_ssh_user }}"
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
algo_provider: "{{ algo_provider }}"
|
||||
algo_server_name: "{{ algo_server_name }}"
|
||||
algo_ondemand_cellular: "{{ algo_ondemand_cellular }}"
|
||||
algo_ondemand_wifi: "{{ algo_ondemand_wifi }}"
|
||||
algo_ondemand_wifi_exclude: "{{ algo_ondemand_wifi_exclude }}"
|
||||
algo_local_dns: "{{ algo_local_dns }}"
|
||||
algo_ssh_tunneling: "{{ algo_ssh_tunneling }}"
|
||||
algo_windows: "{{ algo_windows }}"
|
||||
algo_store_cakey: "{{ algo_store_cakey }}"
|
||||
IP_subject_alt_name: "{{ IP_subject_alt_name }}"
|
||||
|
||||
- name: Additional variables for the server
|
||||
add_host:
|
||||
name: "{% if cloud_instance_ip == 'localhost' %}localhost{% else %}{{ cloud_instance_ip }}{% endif %}"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
when: algo_provider != 'local'
|
||||
|
||||
- name: Wait until SSH becomes ready...
|
||||
wait_for:
|
||||
port: 22
|
||||
host: "{{ cloud_instance_ip }}"
|
||||
search_regex: "OpenSSH"
|
||||
delay: 10
|
||||
timeout: 320
|
||||
state: present
|
||||
when: cloud_instance_ip != "localhost"
|
||||
|
||||
- debug:
|
||||
var: IP_subject_alt_name
|
||||
|
||||
- name: A short pause, in order to be sure the instance is ready
|
||||
pause:
|
||||
seconds: 20
|
28
playbooks/cloud-pre.yml
Normal file
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
- name: Display the invocation environment
|
||||
local_action:
|
||||
module: shell
|
||||
./algo-showenv.sh \
|
||||
'algo_provider "{{ algo_provider }}"' \
|
||||
'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \
|
||||
'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \
|
||||
'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \
|
||||
'algo_local_dns "{{ algo_local_dns }}"' \
|
||||
'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \
|
||||
'algo_windows "{{ algo_windows }}"' \
|
||||
'wireguard_enabled "{{ wireguard_enabled }}"' \
|
||||
'dns_encryption "{{ dns_encryption }}"' \
|
||||
> /dev/tty
|
||||
|
||||
- name: Generate the SSH private key
|
||||
openssl_privatekey:
|
||||
path: "{{ SSH_keys.private }}"
|
||||
size: 2048
|
||||
mode: "0600"
|
||||
type: RSA
|
||||
|
||||
- name: Generate the SSH public key
|
||||
openssl_publickey:
|
||||
path: "{{ SSH_keys.public }}"
|
||||
privatekey_path: "{{ SSH_keys.private }}"
|
||||
format: OpenSSH
|
|
@ -1,15 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Check the system
|
||||
raw: uname -a
|
||||
register: OS
|
||||
|
||||
- name: Ubuntu pre-tasks
|
||||
include_tasks: ubuntu.yml
|
||||
when: '"Ubuntu" in OS.stdout'
|
||||
|
||||
- name: FreeBSD pre-tasks
|
||||
include_tasks: freebsd.yml
|
||||
when: '"FreeBSD" in OS.stdout'
|
||||
|
||||
- include_tasks: facts/main.yml
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
|
||||
- set_fact:
|
||||
config_prefix: "/usr/local/"
|
||||
root_group: wheel
|
||||
ssh_service_name: sshd
|
||||
apparmor_enabled: false
|
||||
strongswan_additional_plugins:
|
||||
- kernel-pfroute
|
||||
- kernel-pfkey
|
|
@ -1,44 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Gather Facts
|
||||
setup:
|
||||
|
||||
- name: Ensure the algo ssh key exist on the server
|
||||
authorized_key:
|
||||
user: "{{ ansible_ssh_user }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
tags: [ 'cloud' ]
|
||||
|
||||
- name: Check if IPv6 configured
|
||||
set_fact:
|
||||
ipv6_support: "{% if ansible_default_ipv6['gateway'] is defined %}true{% else %}false{% endif %}"
|
||||
|
||||
- name: Set facts if the deployment in a cloud
|
||||
set_fact:
|
||||
cloud_deployment: true
|
||||
tags: ['cloud']
|
||||
|
||||
- name: Generate password for the CA key
|
||||
local_action:
|
||||
module: shell
|
||||
openssl rand -hex 16
|
||||
become: no
|
||||
register: CA_password
|
||||
|
||||
- name: Generate p12 export password
|
||||
local_action:
|
||||
module: shell
|
||||
openssl rand 8 | python -c 'import sys,string; chars=string.ascii_letters + string.digits + "_@"; print "".join([chars[ord(c) % 64] for c in list(sys.stdin.read())])'
|
||||
become: no
|
||||
register: p12_export_password_generated
|
||||
when: p12_export_password is not defined
|
||||
|
||||
- name: Define password facts
|
||||
set_fact:
|
||||
easyrsa_p12_export_password: "{{ p12_export_password|default(p12_export_password_generated.stdout) }}"
|
||||
easyrsa_CA_password: "{{ CA_password.stdout }}"
|
||||
|
||||
- name: Define the commonName
|
||||
set_fact:
|
||||
IP_subject_alt_name: "{{ IP_subject_alt_name }}"
|
|
@ -1,9 +0,0 @@
|
|||
---
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Install prerequisites
|
||||
raw: sleep 10 && env ASSUME_ALWAYS_YES=YES sudo pkg install -y python27
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Configure defaults
|
||||
raw: sudo ln -sf /usr/local/bin/python2.7 /usr/bin/python2.7
|
||||
|
||||
- include_tasks: facts/FreeBSD.yml
|
|
@ -1,31 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Generate the SSH private key
|
||||
shell: >
|
||||
echo -e 'n' |
|
||||
ssh-keygen -b 2048 -C {{ SSH_keys.comment }}
|
||||
-t rsa -f {{ SSH_keys.private }} -q -N ""
|
||||
args:
|
||||
creates: "{{ SSH_keys.private }}"
|
||||
|
||||
- name: Generate the SSH public key
|
||||
shell: >
|
||||
echo `ssh-keygen -y -f {{ SSH_keys.private }}` {{ SSH_keys.comment }}
|
||||
> {{ SSH_keys.public }}
|
||||
changed_when: false
|
||||
|
||||
- name: Change mode for the SSH private key
|
||||
file:
|
||||
path: "{{ SSH_keys.private }}"
|
||||
mode: 0600
|
||||
|
||||
- name: Ensure the dynamic inventory exists
|
||||
blockinfile:
|
||||
dest: configs/inventory.dynamic
|
||||
marker: "# {mark} ALGO MANAGED BLOCK"
|
||||
create: true
|
||||
block: |
|
||||
[algo:children]
|
||||
{% for group in cloud_providers.keys() %}
|
||||
{{ group }}
|
||||
{% endfor %}
|
|
@ -1,12 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Ensure the local ssh directory is exist
|
||||
file:
|
||||
path: ~/.ssh/
|
||||
state: directory
|
||||
|
||||
- name: Copy the algo ssh key to the local ssh directory
|
||||
copy:
|
||||
src: "{{ SSH_keys.private }}"
|
||||
dest: ~/.ssh/algo.pem
|
||||
mode: '0600'
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Wait until SSH becomes ready...
|
||||
wait_for:
|
||||
port: 22
|
||||
host: "{{ cloud_instance_ip }}"
|
||||
search_regex: "OpenSSH"
|
||||
delay: 10
|
||||
timeout: 320
|
||||
state: present
|
||||
|
||||
- name: A short pause, in order to be sure the instance is ready
|
||||
pause:
|
||||
seconds: 20
|
||||
|
||||
- include_tasks: local_ssh.yml
|
|
@ -1,9 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Ubuntu | Install prerequisites
|
||||
raw: sleep 10 && sudo apt-get update -qq && sudo apt-get install -qq -y python2.7
|
||||
|
||||
- name: Ubuntu | Configure defaults
|
||||
raw: sudo update-alternatives --install /usr/bin/python python /usr/bin/python2.7 1
|
||||
tags:
|
||||
- update-alternatives
|
|
@ -1,5 +1,6 @@
|
|||
setuptools>=11.3
|
||||
ansible[azure]==2.4.3
|
||||
SecretStorage < 3
|
||||
ansible[azure]==2.5.2
|
||||
dopy==0.3.5
|
||||
boto>=2.5
|
||||
boto3
|
||||
|
@ -9,3 +10,4 @@ pyopenssl
|
|||
jinja2==2.8
|
||||
shade
|
||||
pycrypto
|
||||
segno
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
setup:
|
||||
|
||||
- name: Include system based facts and tasks
|
||||
include_tasks: systems/main.yml
|
||||
import_tasks: systems/main.yml
|
||||
|
||||
- name: Install prerequisites
|
||||
package: name="{{ item }}" state=present
|
||||
|
|
214
roles/cloud-azure/defaults/main.yml
Normal file
|
@ -0,0 +1,214 @@
|
|||
---
|
||||
_azure_regions: >
|
||||
[
|
||||
{
|
||||
"displayName": "East Asia",
|
||||
"latitude": "22.267",
|
||||
"longitude": "114.188",
|
||||
"name": "eastasia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Southeast Asia",
|
||||
"latitude": "1.283",
|
||||
"longitude": "103.833",
|
||||
"name": "southeastasia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Central US",
|
||||
"latitude": "41.5908",
|
||||
"longitude": "-93.6208",
|
||||
"name": "centralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "East US",
|
||||
"latitude": "37.3719",
|
||||
"longitude": "-79.8164",
|
||||
"name": "eastus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "East US 2",
|
||||
"latitude": "36.6681",
|
||||
"longitude": "-78.3889",
|
||||
"name": "eastus2",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West US",
|
||||
"latitude": "37.783",
|
||||
"longitude": "-122.417",
|
||||
"name": "westus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "North Central US",
|
||||
"latitude": "41.8819",
|
||||
"longitude": "-87.6278",
|
||||
"name": "northcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South Central US",
|
||||
"latitude": "29.4167",
|
||||
"longitude": "-98.5",
|
||||
"name": "southcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "North Europe",
|
||||
"latitude": "53.3478",
|
||||
"longitude": "-6.2597",
|
||||
"name": "northeurope",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West Europe",
|
||||
"latitude": "52.3667",
|
||||
"longitude": "4.9",
|
||||
"name": "westeurope",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Japan West",
|
||||
"latitude": "34.6939",
|
||||
"longitude": "135.5022",
|
||||
"name": "japanwest",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Japan East",
|
||||
"latitude": "35.68",
|
||||
"longitude": "139.77",
|
||||
"name": "japaneast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Brazil South",
|
||||
"latitude": "-23.55",
|
||||
"longitude": "-46.633",
|
||||
"name": "brazilsouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia East",
|
||||
"latitude": "-33.86",
|
||||
"longitude": "151.2094",
|
||||
"name": "australiaeast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Southeast",
|
||||
"latitude": "-37.8136",
|
||||
"longitude": "144.9631",
|
||||
"name": "australiasoutheast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South India",
|
||||
"latitude": "12.9822",
|
||||
"longitude": "80.1636",
|
||||
"name": "southindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Central India",
|
||||
"latitude": "18.5822",
|
||||
"longitude": "73.9197",
|
||||
"name": "centralindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West India",
|
||||
"latitude": "19.088",
|
||||
"longitude": "72.868",
|
||||
"name": "westindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Canada Central",
|
||||
"latitude": "43.653",
|
||||
"longitude": "-79.383",
|
||||
"name": "canadacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Canada East",
|
||||
"latitude": "46.817",
|
||||
"longitude": "-71.217",
|
||||
"name": "canadaeast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UK South",
|
||||
"latitude": "50.941",
|
||||
"longitude": "-0.799",
|
||||
"name": "uksouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UK West",
|
||||
"latitude": "53.427",
|
||||
"longitude": "-3.084",
|
||||
"name": "ukwest",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West Central US",
|
||||
"latitude": "40.890",
|
||||
"longitude": "-110.234",
|
||||
"name": "westcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West US 2",
|
||||
"latitude": "47.233",
|
||||
"longitude": "-119.852",
|
||||
"name": "westus2",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Korea Central",
|
||||
"latitude": "37.5665",
|
||||
"longitude": "126.9780",
|
||||
"name": "koreacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Korea South",
|
||||
"latitude": "35.1796",
|
||||
"longitude": "129.0756",
|
||||
"name": "koreasouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "France Central",
|
||||
"latitude": "46.3772",
|
||||
"longitude": "2.3730",
|
||||
"name": "francecentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "France South",
|
||||
"latitude": "43.8345",
|
||||
"longitude": "2.1972",
|
||||
"name": "francesouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Central",
|
||||
"latitude": "-35.3075",
|
||||
"longitude": "149.1244",
|
||||
"name": "australiacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Central 2",
|
||||
"latitude": "-35.3075",
|
||||
"longitude": "149.1244",
|
||||
"name": "australiacentral2",
|
||||
"subscriptionId": null
|
||||
}
|
||||
]
|
209
roles/cloud-azure/files/deployment.json
Normal file
|
@ -0,0 +1,209 @@
|
|||
{
|
||||
"$schema": "http://schema.management.azure.com/schemas/2014-04-01-preview/deploymentTemplate.json",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
"AlgoServerName": {
|
||||
"type": "string"
|
||||
},
|
||||
"sshKeyData": {
|
||||
"type": "string"
|
||||
},
|
||||
"location": {
|
||||
"type": "string"
|
||||
},
|
||||
"WireGuardPort": {
|
||||
"type": "int"
|
||||
},
|
||||
"vmSize": {
|
||||
"type": "string"
|
||||
},
|
||||
"imageReferenceSku": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"variables": {
|
||||
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', parameters('AlgoServerName'))]",
|
||||
"subnet1Ref": "[concat(variables('vnetID'),'/subnets/', parameters('AlgoServerName'))]"
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"apiVersion": "2015-06-15",
|
||||
"type": "Microsoft.Network/networkSecurityGroups",
|
||||
"name": "[parameters('AlgoServerName')]",
|
||||
"location": "[parameters('location')]",
|
||||
"properties": {
|
||||
"securityRules": [
|
||||
{
|
||||
"name": "AllowSSH",
|
||||
"properties": {
|
||||
"description": "Locks inbound down to ssh default port 22.",
|
||||
"protocol": "Tcp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "22",
|
||||
"sourceAddressPrefix": "*",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
"priority": 100,
|
||||
"direction": "Inbound"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "AllowIPSEC500",
|
||||
"properties": {
|
||||
"description": "Allow UDP to port 500",
|
||||
"protocol": "Udp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "500",
|
||||
"sourceAddressPrefix": "*",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
"priority": 110,
|
||||
"direction": "Inbound"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "AllowIPSEC4500",
|
||||
"properties": {
|
||||
"description": "Allow UDP to port 4500",
|
||||
"protocol": "Udp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "4500",
|
||||
"sourceAddressPrefix": "*",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
"priority": 120,
|
||||
"direction": "Inbound"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "AllowWireGuard",
|
||||
"properties": {
|
||||
"description": "Locks inbound down to ssh default port 22.",
|
||||
"protocol": "Udp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "[parameters('WireGuardPort')]",
|
||||
"sourceAddressPrefix": "*",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
"priority": 130,
|
||||
"direction": "Inbound"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2015-06-15",
|
||||
"type": "Microsoft.Network/publicIPAddresses",
|
||||
"name": "[parameters('AlgoServerName')]",
|
||||
"location": "[parameters('location')]",
|
||||
"properties": {
|
||||
"publicIPAllocationMethod": "Static"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2015-06-15",
|
||||
"type": "Microsoft.Network/virtualNetworks",
|
||||
"name": "[parameters('AlgoServerName')]",
|
||||
"location": "[parameters('location')]",
|
||||
"properties": {
|
||||
"addressSpace": {
|
||||
"addressPrefixes": [
|
||||
"10.10.0.0/16"
|
||||
]
|
||||
},
|
||||
"subnets": [
|
||||
{
|
||||
"name": "[parameters('AlgoServerName')]",
|
||||
"properties": {
|
||||
"addressPrefix": "10.10.0.0/24"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2015-06-15",
|
||||
"type": "Microsoft.Network/networkInterfaces",
|
||||
"name": "[parameters('AlgoServerName')]",
|
||||
"location": "[parameters('location')]",
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/networkSecurityGroups/', parameters('AlgoServerName'))]",
|
||||
"[concat('Microsoft.Network/publicIPAddresses/', parameters('AlgoServerName'))]",
|
||||
"[concat('Microsoft.Network/virtualNetworks/', parameters('AlgoServerName'))]"
|
||||
],
|
||||
"properties": {
|
||||
"networkSecurityGroup": {
|
||||
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', parameters('AlgoServerName'))]"
|
||||
},
|
||||
"ipConfigurations": [
|
||||
{
|
||||
"name": "ipconfig1",
|
||||
"properties": {
|
||||
"privateIPAllocationMethod": "Dynamic",
|
||||
"publicIPAddress": {
|
||||
"id": "[resourceId('Microsoft.Network/publicIPAddresses', parameters('AlgoServerName'))]"
|
||||
},
|
||||
"subnet": {
|
||||
"id": "[variables('subnet1Ref')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2016-04-30-preview",
|
||||
"type": "Microsoft.Compute/virtualMachines",
|
||||
"name": "[parameters('AlgoServerName')]",
|
||||
"location": "[parameters('location')]",
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/networkInterfaces/', parameters('AlgoServerName'))]"
|
||||
],
|
||||
"properties": {
|
||||
"hardwareProfile": {
|
||||
"vmSize": "[parameters('vmSize')]"
|
||||
},
|
||||
"osProfile": {
|
||||
"computerName": "[parameters('AlgoServerName')]",
|
||||
"adminUsername": "ubuntu",
|
||||
"linuxConfiguration": {
|
||||
"disablePasswordAuthentication": true,
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"path": "/home/ubuntu/.ssh/authorized_keys",
|
||||
"keyData": "[parameters('sshKeyData')]"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageProfile": {
|
||||
"imageReference": {
|
||||
"publisher": "Canonical",
|
||||
"offer": "UbuntuServer",
|
||||
"sku": "[parameters('imageReferenceSku')]",
|
||||
"version": "latest"
|
||||
},
|
||||
"osDisk": {
|
||||
"createOption": "FromImage"
|
||||
}
|
||||
},
|
||||
"networkProfile": {
|
||||
"networkInterfaces": [
|
||||
{
|
||||
"id": "[resourceId('Microsoft.Network/networkInterfaces', parameters('AlgoServerName'))]"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"publicIPAddresses": {
|
||||
"type": "string",
|
||||
"value": "[reference(resourceId('Microsoft.Network/publicIPAddresses',parameters('AlgoServerName')),providers('Microsoft.Network', 'publicIPAddresses').apiVersions[0]).ipAddress]",
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,140 +1,43 @@
|
|||
---
|
||||
- block:
|
||||
- set_fact:
|
||||
resource_group: "Algo_{{ region }}"
|
||||
secret: "{{ azure_secret | default(lookup('env','AZURE_SECRET'), true) }}"
|
||||
tenant: "{{ azure_tenant | default(lookup('env','AZURE_TENANT'), true) }}"
|
||||
client_id: "{{ azure_client_id | default(lookup('env','AZURE_CLIENT_ID'), true) }}"
|
||||
subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}"
|
||||
|
||||
- name: Create a resource group
|
||||
azure_rm_resourcegroup:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
name: "{{ resource_group }}"
|
||||
location: "{{ region }}"
|
||||
tags:
|
||||
Environment: Algo
|
||||
|
||||
- name: Create a virtual network
|
||||
azure_rm_virtualnetwork:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
name: algo_net
|
||||
address_prefixes: "10.10.0.0/16"
|
||||
tags:
|
||||
Environment: Algo
|
||||
|
||||
- name: Create a security group
|
||||
azure_rm_securitygroup:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
name: AlgoSecGroup
|
||||
purge_rules: yes
|
||||
rules:
|
||||
- name: AllowSSH
|
||||
protocol: Tcp
|
||||
destination_port_range: 22
|
||||
access: Allow
|
||||
priority: 100
|
||||
direction: Inbound
|
||||
- name: AllowIPSEC500
|
||||
protocol: Udp
|
||||
destination_port_range: 500
|
||||
access: Allow
|
||||
priority: 110
|
||||
direction: Inbound
|
||||
- name: AllowIPSEC4500
|
||||
protocol: Udp
|
||||
destination_port_range: 4500
|
||||
access: Allow
|
||||
priority: 120
|
||||
direction: Inbound
|
||||
|
||||
- name: Create a subnet
|
||||
azure_rm_subnet:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
name: algo_subnet
|
||||
address_prefix: "10.10.0.0/24"
|
||||
virtual_network: algo_net
|
||||
security_group_name: AlgoSecGroup
|
||||
tags:
|
||||
Environment: Algo
|
||||
|
||||
- name: Create an instance
|
||||
azure_rm_virtualmachine:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
admin_username: ubuntu
|
||||
virtual_network: algo_net
|
||||
name: "{{ azure_server_name }}"
|
||||
ssh_password_enabled: false
|
||||
vm_size: "{{ cloud_providers.azure.size }}"
|
||||
tags:
|
||||
Environment: Algo
|
||||
ssh_public_keys:
|
||||
- { path: "/home/ubuntu/.ssh/authorized_keys", key_data: "{{ lookup('file', '{{ SSH_keys.public }}') }}" }
|
||||
image: "{{ cloud_providers.azure.image }}"
|
||||
register: azure_rm_virtualmachine
|
||||
|
||||
# To-do: Add error handling - if vm_size requested is not available, can we fall back to another, ideally with a prompt?
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- set_fact:
|
||||
ip_address: "{{ azure_rm_virtualmachine.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.publicIPAddress.properties.ipAddress }}"
|
||||
networkinterface_name: "{{ azure_rm_virtualmachine.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].name }}"
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ azure_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- else %}{{ azure_regions[default_region | int - 1]['name'] }}{% endif %}
|
||||
|
||||
- name: Ensure the network interface includes all required parameters
|
||||
azure_rm_networkinterface:
|
||||
- name: Create AlgoVPN Server
|
||||
azure_rm_deployment:
|
||||
state: present
|
||||
deployment_name: "AlgoVPN-{{ algo_server_name }}"
|
||||
template: "{{ lookup('file', 'deployment.json') }}"
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
name: "{{ networkinterface_name }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
virtual_network_name: algo_net
|
||||
subnet_name: algo_subnet
|
||||
security_group_name: AlgoSecGroup
|
||||
resource_group_name: "AlgoVPN-{{ algo_server_name }}"
|
||||
parameters:
|
||||
AlgoServerName:
|
||||
value: "{{ algo_server_name }}"
|
||||
sshKeyData:
|
||||
value: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
location:
|
||||
value: "{{ algo_region }}"
|
||||
WireGuardPort:
|
||||
value: "{{ wireguard_port }}"
|
||||
vmSize:
|
||||
value: "{{ cloud_providers.azure.size }}"
|
||||
imageReferenceSku:
|
||||
value: "{{ cloud_providers.azure.image }}"
|
||||
register: azure_rm_deployment
|
||||
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ ip_address }}"
|
||||
groups: vpn-host
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ azure_rm_deployment.deployment.outputs.publicIPAddresses.value }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: azure
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ ip_address }}"
|
||||
|
||||
- name: Ensure the group azure exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[azure]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[azure\]'
|
||||
regexp: "^{{ cloud_instance_ip }}.*"
|
||||
line: "{{ cloud_instance_ip }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
|
70
roles/cloud-azure/tasks/prompts.yml
Normal file
|
@ -0,0 +1,70 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your azure secret id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
echo: false
|
||||
register: _azure_secret
|
||||
when:
|
||||
- azure_secret is undefined
|
||||
- lookup('env','AZURE_SECRET')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your azure tenant id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
echo: false
|
||||
register: _azure_tenant
|
||||
when:
|
||||
- azure_tenant is undefined
|
||||
- lookup('env','AZURE_TENANT')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your azure client id (application id) (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
echo: false
|
||||
register: _azure_client_id
|
||||
when:
|
||||
- azure_client_id is undefined
|
||||
- lookup('env','AZURE_CLIENT_ID')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your azure subscription id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
echo: false
|
||||
register: _azure_subscription_id
|
||||
when:
|
||||
- azure_subscription_id is undefined
|
||||
- lookup('env','AZURE_SUBSCRIPTION_ID')|length <= 0
|
||||
|
||||
- set_fact:
|
||||
secret: "{{ azure_secret | default(_azure_secret.user_input|default(None)) | default(lookup('env','AZURE_SECRET'), true) }}"
|
||||
tenant: "{{ azure_tenant | default(_azure_tenant.user_input|default(None)) | default(lookup('env','AZURE_TENANT'), true) }}"
|
||||
client_id: "{{ azure_client_id | default(_azure_client_id.user_input|default(None)) | default(lookup('env','AZURE_CLIENT_ID'), true) }}"
|
||||
subscription_id: "{{ azure_subscription_id | default(_azure_subscription_id.user_input|default(None)) | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}"
|
||||
|
||||
- block:
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
azure_regions: "{{ _azure_regions|from_json | sort(attribute='name') }}"
|
||||
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in azure_regions %}
|
||||
{%- if r['name'] == "eastus" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
{% for r in azure_regions %}
|
||||
{{ loop.index }}. {{ r['displayName'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
|
@ -1,7 +1,13 @@
|
|||
- block:
|
||||
- name: Set the DigitalOcean Access Token fact
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Set additional facts
|
||||
set_fact:
|
||||
do_token: "{{ do_access_token | default(lookup('env','DO_API_TOKEN'), true) }}"
|
||||
algo_do_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ do_regions[_algo_region.user_input | int -1 ]['slug'] }}
|
||||
{%- else %}{{ do_regions[default_region | int - 1]['slug'] }}{% endif %}
|
||||
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- block:
|
||||
|
@ -9,7 +15,7 @@
|
|||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
until: ssh_keys.changed != true
|
||||
|
@ -21,7 +27,7 @@
|
|||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
|
@ -36,7 +42,7 @@
|
|||
state: present
|
||||
command: ssh
|
||||
ssh_pub_key: "{{ public_key }}"
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: do_ssh_key
|
||||
|
||||
|
@ -44,69 +50,33 @@
|
|||
digital_ocean:
|
||||
state: present
|
||||
command: droplet
|
||||
name: "{{ do_server_name }}"
|
||||
region_id: "{{ do_region }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
region_id: "{{ algo_do_region }}"
|
||||
size_id: "{{ cloud_providers.digitalocean.size }}"
|
||||
image_id: "{{ cloud_providers.digitalocean.image }}"
|
||||
ssh_key_ids: "{{ do_ssh_key.ssh_key.id }}"
|
||||
unique_name: yes
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
ipv6: yes
|
||||
register: do
|
||||
|
||||
- name: Add the droplet to an inventory group
|
||||
add_host:
|
||||
name: "{{ do.droplet.ip_address }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: root
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
do_access_token: "{{ do_token }}"
|
||||
do_droplet_id: "{{ do.droplet.id }}"
|
||||
cloud_provider: digitalocean
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ do.droplet.ip_address }}"
|
||||
ansible_ssh_user: root
|
||||
|
||||
- name: Tag the droplet
|
||||
digital_ocean_tag:
|
||||
name: "Environment:Algo"
|
||||
resource_id: "{{ do.droplet.id }}"
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
state: present
|
||||
|
||||
- name: Get droplets
|
||||
uri:
|
||||
url: "https://api.digitalocean.com/v2/droplets?tag_name=Environment:Algo"
|
||||
method: GET
|
||||
status_code: 200
|
||||
headers:
|
||||
Content-Type: "application/json"
|
||||
Authorization: "Bearer {{ do_token }}"
|
||||
register: do_droplets
|
||||
|
||||
- name: Ensure the group digitalocean exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[digitalocean]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[digitalocean\]'
|
||||
regexp: "^{{ item.networks.v4[0].ip_address }}.*"
|
||||
line: "{{ item.networks.v4[0].ip_address }}"
|
||||
with_items:
|
||||
- "{{ do_droplets.json.droplets }}"
|
||||
|
||||
- block:
|
||||
- name: "Delete the new Algo SSH key"
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
until: ssh_keys.changed != true
|
||||
|
@ -118,7 +88,7 @@
|
|||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
|
|
46
roles/cloud-digitalocean/tasks/prompts.yml
Normal file
|
@ -0,0 +1,46 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your API token. The token must have read and write permissions (https://cloud.digitalocean.com/settings/api/tokens):
|
||||
echo: false
|
||||
register: _do_token
|
||||
when:
|
||||
- do_token is undefined
|
||||
- lookup('env','DO_API_TOKEN')|length <= 0
|
||||
|
||||
- name: Set the token as a fact
|
||||
set_fact:
|
||||
algo_do_token: "{{ do_token | default(_do_token.user_input|default(None)) | default(lookup('env','DO_API_TOKEN'), true) }}"
|
||||
|
||||
- name: Get regions
|
||||
uri:
|
||||
url: https://api.digitalocean.com/v2/regions
|
||||
method: GET
|
||||
status_code: 200
|
||||
headers:
|
||||
Content-Type: "application/json"
|
||||
Authorization: "Bearer {{ algo_do_token }}"
|
||||
register: _do_regions
|
||||
|
||||
- name: Set facts about thre regions
|
||||
set_fact:
|
||||
do_regions: "{{ _do_regions.json.regions | sort(attribute='slug') }}"
|
||||
|
||||
- name: Set default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in do_regions %}
|
||||
{%- if r['slug'] == "nyc3" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
{% for r in do_regions %}
|
||||
{{ loop.index }}. {{ r['slug'] }} {{ r['name'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
|
@ -1,6 +0,0 @@
|
|||
iface eth0 inet6 static
|
||||
address {{ item.ip_address }}
|
||||
netmask {{ item.netmask }}
|
||||
gateway {{ item.gateway }}
|
||||
autoconf 0
|
||||
dns-nameservers 2001:4860:4860::8844 2001:4860:4860::8888
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
|
||||
ami_search_encrypted: omit
|
||||
encrypted: "{{ cloud_providers.ec2.encrypted }}"
|
||||
ec2_vpc_nets:
|
||||
cidr_block: 172.16.0.0/16
|
||||
subnet_cidr: 172.16.254.0/23
|
||||
|
|
|
@ -9,6 +9,8 @@ Parameters:
|
|||
Type: String
|
||||
ImageIdParameter:
|
||||
Type: String
|
||||
WireGuardPort:
|
||||
Type: String
|
||||
Resources:
|
||||
VPC:
|
||||
Type: AWS::EC2::VPC
|
||||
|
@ -132,6 +134,10 @@ Resources:
|
|||
FromPort: '4500'
|
||||
ToPort: '4500'
|
||||
CidrIp: 0.0.0.0/0
|
||||
- IpProtocol: udp
|
||||
FromPort: !Ref WireGuardPort
|
||||
ToPort: !Ref WireGuardPort
|
||||
CidrIp: 0.0.0.0/0
|
||||
Tags:
|
||||
- Key: Name
|
||||
Value: Algo
|
||||
|
@ -147,11 +153,6 @@ Resources:
|
|||
Metadata:
|
||||
AWS::CloudFormation::Init:
|
||||
config:
|
||||
users:
|
||||
ubuntu:
|
||||
groups:
|
||||
- "sudo"
|
||||
homeDir: "/home/ubuntu/"
|
||||
files:
|
||||
/home/ubuntu/.ssh/authorized_keys:
|
||||
content:
|
||||
|
@ -173,18 +174,9 @@ Resources:
|
|||
"Fn::Base64":
|
||||
!Sub |
|
||||
#!/bin/bash -xe
|
||||
# http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-migrate-ipv6.html
|
||||
# https://bugs.launchpad.net/ubuntu/+source/ifupdown/+bug/1013597
|
||||
cat <<EOF > /etc/network/interfaces.d/60-default-with-ipv6.cfg
|
||||
iface eth0 inet6 dhcp
|
||||
up sysctl net.ipv6.conf.\$IFACE.accept_ra=2
|
||||
pre-down ip link set dev \$IFACE up
|
||||
EOF
|
||||
ifdown eth0; ifup eth0
|
||||
dhclient -6
|
||||
apt-get update
|
||||
apt-get -y install python-setuptools
|
||||
easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
|
||||
apt-get -y install python-pip
|
||||
pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
|
||||
cfn-init -v --stack ${AWS::StackName} --resource EC2Instance --region ${AWS::Region}
|
||||
cfn-signal -e $? --stack ${AWS::StackName} --resource EC2Instance --region ${AWS::Region}
|
||||
Tags:
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
---
|
||||
- name: Deploy the template
|
||||
cloudformation:
|
||||
aws_access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true)}}"
|
||||
aws_secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true)}}"
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
stack_name: "{{ stack_name }}"
|
||||
state: "present"
|
||||
region: "{{ region }}"
|
||||
region: "{{ algo_region }}"
|
||||
template: roles/cloud-ec2/files/stack.yml
|
||||
template_parameters:
|
||||
InstanceTypeParameter: "{{ cloud_providers.ec2.size }}"
|
||||
PublicSSHKeyParameter: "{{ lookup('file', SSH_keys.public) }}"
|
||||
ImageIdParameter: "{{ ami_image }}"
|
||||
WireGuardPort: "{{ wireguard_port }}"
|
||||
tags:
|
||||
Environment: Algo
|
||||
register: stack
|
||||
|
|
|
@ -1,37 +1,27 @@
|
|||
---
|
||||
- name: Check if the encrypted image already exist
|
||||
ec2_ami_find:
|
||||
aws_access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true)}}"
|
||||
aws_secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true)}}"
|
||||
owner: self
|
||||
sort: creationDate
|
||||
sort_order: descending
|
||||
sort_end: 1
|
||||
state: available
|
||||
ami_tags:
|
||||
Algo: "encrypted"
|
||||
region: "{{ region }}"
|
||||
ec2_ami_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
owners: self
|
||||
region: "{{ algo_region }}"
|
||||
filters:
|
||||
state: available
|
||||
"tag:Algo": encrypted
|
||||
register: search_crypt
|
||||
|
||||
- set_fact:
|
||||
ami_image: "{{ search_crypt.results[0].ami_id }}"
|
||||
when: search_crypt.results
|
||||
|
||||
- name: Copy to an encrypted image
|
||||
ec2_ami_copy:
|
||||
aws_access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true)}}"
|
||||
aws_secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true)}}"
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
encrypted: yes
|
||||
name: algo
|
||||
kms_key_id: "{{ kms_key_id | default(omit) }}"
|
||||
region: "{{ region }}"
|
||||
source_image_id: "{{ ami_image }}"
|
||||
source_region: "{{ region }}"
|
||||
region: "{{ algo_region }}"
|
||||
source_image_id: "{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}"
|
||||
source_region: "{{ algo_region }}"
|
||||
wait: true
|
||||
tags:
|
||||
Algo: "encrypted"
|
||||
wait: true
|
||||
register: enc_image
|
||||
when: not search_crypt.results
|
||||
|
||||
- set_fact:
|
||||
ami_image: "{{ enc_image.image_id }}"
|
||||
when: not search_crypt.results
|
||||
register: ami_search_encrypted
|
||||
when: search_crypt.images|length|int == 0
|
||||
|
|
|
@ -1,66 +1,40 @@
|
|||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- set_fact:
|
||||
access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||
secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
stack_name: "{{ aws_server_name | replace('.', '-') }}"
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ aws_regions[_algo_region.user_input | int -1 ]['region_name'] }}
|
||||
{%- else %}{{ aws_regions[default_region | int - 1]['region_name'] }}{% endif %}
|
||||
stack_name: "{{ algo_server_name | replace('.', '-') }}"
|
||||
|
||||
- name: Locate official AMI for region
|
||||
ec2_ami_find:
|
||||
ec2_ami_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
|
||||
owner: "{{ cloud_providers.ec2.image.owner }}"
|
||||
sort: creationDate
|
||||
sort_order: descending
|
||||
sort_end: 1
|
||||
region: "{{ region }}"
|
||||
owners: "{{ cloud_providers.ec2.image.owner }}"
|
||||
region: "{{ algo_region }}"
|
||||
filters:
|
||||
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
|
||||
register: ami_search
|
||||
|
||||
- set_fact:
|
||||
ami_image: "{{ ami_search.results[0].ami_id }}"
|
||||
- import_tasks: encrypt_image.yml
|
||||
when: encrypted
|
||||
|
||||
- include_tasks: encrypt_image.yml
|
||||
tags: [encrypted]
|
||||
- name: Set the ami id as a fact
|
||||
set_fact:
|
||||
ami_image: >-
|
||||
{% if ami_search_encrypted.image_id is defined %}{{ ami_search_encrypted.image_id }}
|
||||
{%- elif search_crypt.images is defined and search_crypt.images|length >= 1 %}{{ (search_crypt.images | sort(attribute='creation_date') | last)['image_id'] }}
|
||||
{%- else %}{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}{% endif %}
|
||||
|
||||
- include_tasks: cloudformation.yml
|
||||
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ stack.stack_outputs.ElasticIP }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: ec2
|
||||
- name: Deploy the stack
|
||||
import_tasks: cloudformation.yml
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ stack.stack_outputs.ElasticIP }}"
|
||||
|
||||
- name: Get EC2 instances
|
||||
ec2_instance_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: "{{ region }}"
|
||||
filters:
|
||||
instance-state-name: running
|
||||
"tag:Environment": Algo
|
||||
register: algo_instances
|
||||
|
||||
- name: Ensure the group ec2 exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[ec2]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[ec2\]'
|
||||
regexp: "^{{ item.public_ip_address }}.*"
|
||||
line: "{{ item.public_ip_address }}"
|
||||
with_items:
|
||||
- "{{ algo_instances.instances }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
|
55
roles/cloud-ec2/tasks/prompts.yml
Normal file
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md)
|
||||
echo: false
|
||||
register: _aws_access_key
|
||||
when:
|
||||
- aws_access_key is undefined
|
||||
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
echo: false
|
||||
register: _aws_secret_key
|
||||
when:
|
||||
- aws_secret_key is undefined
|
||||
- lookup('env','AWS_SECRET_ACCESS_KEY')|length <= 0
|
||||
|
||||
- set_fact:
|
||||
access_key: "{{ aws_access_key | default(_aws_access_key.user_input|default(None)) | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||
secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
|
||||
- block:
|
||||
- name: Get regions
|
||||
aws_region_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: us-east-1
|
||||
register: _aws_regions
|
||||
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
aws_regions: "{{ _aws_regions.regions | sort(attribute='region_name') }}"
|
||||
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in aws_regions %}
|
||||
{%- if r['region_name'] == "us-east-1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
|
||||
{% for r in aws_regions %}
|
||||
{{ loop.index }}. {{ r['region_name'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
|
@ -1,68 +1,54 @@
|
|||
- block:
|
||||
- set_fact:
|
||||
credentials_file_path: "{{ credentials_file | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}"
|
||||
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- set_fact:
|
||||
credentials_file_lookup: "{{ lookup('file', '{{ credentials_file_path }}') }}"
|
||||
|
||||
- set_fact:
|
||||
service_account_email: "{{ credentials_file_lookup.client_email | default(lookup('env','GCE_EMAIL')) }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}"
|
||||
server_name: "{{ gce_server_name | replace('_', '-') }}"
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Network configured
|
||||
gce_net:
|
||||
name: "algo-net-{{ server_name }}"
|
||||
fwname: "algo-net-{{ server_name }}-fw"
|
||||
allowed: "udp:500,4500;tcp:22"
|
||||
name: "algo-net-{{ algo_server_name }}"
|
||||
fwname: "algo-net-{{ algo_server_name }}-fw"
|
||||
allowed: "udp:500,4500,{{ wireguard_port }};tcp:22"
|
||||
state: "present"
|
||||
mode: auto
|
||||
src_range: 0.0.0.0/0
|
||||
service_account_email: "{{ credentials_file_lookup.client_email }}"
|
||||
credentials_file: "{{ credentials_file }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
|
||||
- block:
|
||||
- name: External IP allocated
|
||||
gce_eip:
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
region: "{{ algo_region.split('-')[0:2] | join('-') }}"
|
||||
state: present
|
||||
register: gce_eip
|
||||
|
||||
- name: Set External IP as a fact
|
||||
set_fact:
|
||||
external_ip: "{{ gce_eip.address }}"
|
||||
when: cloud_providers.gce.external_static_ip
|
||||
|
||||
- name: "Creating a new instance..."
|
||||
gce:
|
||||
instance_names: "{{ server_name }}"
|
||||
zone: "{{ zone }}"
|
||||
instance_names: "{{ algo_server_name }}"
|
||||
zone: "{{ algo_region }}"
|
||||
external_ip: "{{ external_ip | default('ephemeral') }}"
|
||||
machine_type: "{{ cloud_providers.gce.size }}"
|
||||
image: "{{ cloud_providers.gce.image }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
metadata: '{"ssh-keys":"ubuntu:{{ ssh_public_key_lookup }}"}'
|
||||
network: "algo-net-{{ server_name }}"
|
||||
network: "algo-net-{{ algo_server_name }}"
|
||||
tags:
|
||||
- "environment-algo"
|
||||
register: google_vm
|
||||
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: gce
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
|
||||
- name: Ensure the group gce exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[gce]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[gce\]'
|
||||
regexp: "^{{ google_vm.instance_data[0].public_ip }}.*"
|
||||
line: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
|
67
roles/cloud-gce/tasks/prompts.yml
Normal file
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the local path to your credentials JSON file
|
||||
(https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts)
|
||||
register: _gce_credentials_file
|
||||
when:
|
||||
- gce_credentials_file is undefined
|
||||
- lookup('env','GCE_CREDENTIALS_FILE_PATH')|length <= 0
|
||||
|
||||
- set_fact:
|
||||
credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}"
|
||||
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- set_fact:
|
||||
credentials_file_lookup: "{{ lookup('file', '{{ credentials_file_path }}') }}"
|
||||
|
||||
- set_fact:
|
||||
service_account_email: "{{ credentials_file_lookup.client_email | default(lookup('env','GCE_EMAIL')) }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}"
|
||||
|
||||
- block:
|
||||
- name: Get regions
|
||||
gce_region_facts:
|
||||
service_account_email: "{{ credentials_file_lookup.client_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id }}"
|
||||
register: _gce_regions
|
||||
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
gce_regions: >-
|
||||
[{%- for region in _gce_regions.results.regions | sort(attribute='name') -%}
|
||||
{% if region.status == "UP" %}
|
||||
{% for zone in region.zones | sort(attribute='name') %}
|
||||
{% if zone.status == "UP" %}
|
||||
'{{ zone.name }}'
|
||||
{% endif %}{% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}{% if not loop.last %},{% endif %}
|
||||
{%- endfor -%}]
|
||||
|
||||
- name: Set facts about the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for region in gce_regions %}
|
||||
{%- if region == "us-east1-b" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://cloud.google.com/compute/docs/regions-zones/)
|
||||
{% for r in gce_regions %}
|
||||
{{ loop.index }}. {{ r }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _gce_region
|
||||
when: region is undefined
|
||||
|
||||
- set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _gce_region.user_input is defined and _gce_region.user_input != "" %}{{ gce_regions[_gce_region.user_input | int -1 ] }}
|
||||
{%- else %}{{ gce_regions[default_region | int - 1] }}{% endif %}
|
|
@ -1,8 +1,6 @@
|
|||
- block:
|
||||
- set_fact:
|
||||
access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||
secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
region: "{{ algo_region | default(lookup('env','AWS_DEFAULT_REGION'), true) }}"
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Create an instance
|
||||
lightsail:
|
||||
|
@ -10,8 +8,8 @@
|
|||
aws_secret_key: "{{ secret_key }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
state: present
|
||||
region: "{{ region }}"
|
||||
zone: "{{ region }}a"
|
||||
region: "{{ algo_region }}"
|
||||
zone: "{{ algo_region }}a"
|
||||
blueprint_id: "{{ cloud_providers.lightsail.image }}"
|
||||
bundle_id: "{{ cloud_providers.lightsail.size }}"
|
||||
wait_timeout: 300
|
||||
|
@ -22,6 +20,9 @@
|
|||
- from_port: 500
|
||||
to_port: 500
|
||||
protocol: udp
|
||||
- from_port: "{{ wireguard_port }}"
|
||||
to_port: "{{ wireguard_port }}"
|
||||
protocol: udp
|
||||
user_data: |
|
||||
#!/bin/bash
|
||||
mkdir -p /home/ubuntu/.ssh/
|
||||
|
@ -34,15 +35,7 @@
|
|||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ algo_instance['instance']['public_ip_address'] }}"
|
||||
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ cloud_instance_ip }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: lightsail
|
||||
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
|
|
61
roles/cloud-lightsail/tasks/prompts.yml
Normal file
|
@ -0,0 +1,61 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md)
|
||||
echo: false
|
||||
register: _aws_access_key
|
||||
when:
|
||||
- aws_access_key is undefined
|
||||
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
echo: false
|
||||
register: _aws_secret_key
|
||||
when:
|
||||
- aws_secret_key is undefined
|
||||
- lookup('env','AWS_SECRET_ACCESS_KEY')|length <= 0
|
||||
|
||||
- set_fact:
|
||||
access_key: "{{ aws_access_key | default(_aws_access_key.user_input|default(None)) | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||
secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
|
||||
- block:
|
||||
- name: Get regions
|
||||
lightsail_region_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: us-east-1
|
||||
register: _lightsail_regions
|
||||
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
lightsail_regions: "{{ _lightsail_regions.results.regions | sort(attribute='name') }}"
|
||||
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in lightsail_regions %}
|
||||
{%- if r['name'] == "us-east-1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/)
|
||||
{% for r in lightsail_regions %}
|
||||
{{ (loop.index|string + '.').ljust(3) }} {{ r['name'].ljust(20) }} {{ r['displayName'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
||||
- set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ lightsail_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- else %}{{ lightsail_regions[default_region | int - 1]['name'] }}{% endif %}
|
|
@ -1,4 +1,8 @@
|
|||
---
|
||||
- fail:
|
||||
msg: "OpenStack credentials are not set. Download it from the OpenStack dashboard->Compute->API Access and source it in the shell (eg: source /tmp/dhc-openrc.sh)"
|
||||
when: lookup('env', 'OS_AUTH_URL') == ""
|
||||
|
||||
- block:
|
||||
- name: Security group created
|
||||
os_security_group:
|
||||
|
@ -20,6 +24,7 @@
|
|||
- { proto: icmp, port_min: -1, port_max: -1, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: 4500, port_max: 4500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: 500, port_max: 500, range: 0.0.0.0/0 }
|
||||
- { proto: udp, port_min: "{{ wireguard_port }}", port_max: "{{ wireguard_port }}", range: 0.0.0.0/0 }
|
||||
|
||||
- name: Keypair created
|
||||
os_keypair:
|
||||
|
@ -69,15 +74,7 @@
|
|||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ os_server['openstack']['public_v4'] }}"
|
||||
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ cloud_instance_ip }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: openstack
|
||||
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
|
|
4
roles/cloud-scaleway/defaults/main.yml
Normal file
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
scaleway_regions:
|
||||
- alias: par1
|
||||
- alias: ams1
|
10
roles/cloud-scaleway/tasks/image_facts.yml
Normal file
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
- name: Set image id as a fact
|
||||
set_fact:
|
||||
image_id: "{{ item.id }}"
|
||||
no_log: true
|
||||
when:
|
||||
- cloud_providers.scaleway.image == item.name
|
||||
- cloud_providers.scaleway.arch == item.arch
|
||||
- server_disk_size == item.root_volume.size
|
||||
with_items: "{{ outer_item['json']['images'] }}"
|
|
@ -1,11 +1,23 @@
|
|||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Set disk size
|
||||
set_fact:
|
||||
server_disk_size: 50000000000
|
||||
|
||||
- name: Check server size
|
||||
set_fact:
|
||||
server_disk_size: 25000000000
|
||||
when: cloud_providers.scaleway.size == "START1-XS"
|
||||
|
||||
- name: Check if server exists
|
||||
uri:
|
||||
url: "https://cp-{{ algo_region }}.scaleway.com/servers"
|
||||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
register: scaleway_servers
|
||||
|
||||
|
@ -24,7 +36,7 @@
|
|||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
register: scaleway_organizations
|
||||
|
||||
|
@ -32,27 +44,35 @@
|
|||
set_fact:
|
||||
organization_id: "{{ item.id }}"
|
||||
no_log: true
|
||||
when: scaleway_organization == item.name
|
||||
when: algo_scaleway_org == item.name
|
||||
with_items: "{{ scaleway_organizations.json.organizations }}"
|
||||
|
||||
- name: Get images
|
||||
- name: Get total count of images
|
||||
uri:
|
||||
url: "https://cp-{{ algo_region }}.scaleway.com/images"
|
||||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
register: scaleway_pages
|
||||
|
||||
- name: Get images
|
||||
uri:
|
||||
url: "https://cp-{{ algo_region }}.scaleway.com/images?per_page=100&page={{ item }}"
|
||||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
register: scaleway_images
|
||||
with_sequence: start=1 end={{ ((scaleway_pages.x_total_count|int / 100)| round )|int }}
|
||||
|
||||
- name: Set image id as a fact
|
||||
set_fact:
|
||||
image_id: "{{ item.id }}"
|
||||
no_log: true
|
||||
when:
|
||||
- cloud_providers.scaleway.image in item.name
|
||||
- cloud_providers.scaleway.arch == item.arch
|
||||
with_items: "{{ scaleway_images.json.images }}"
|
||||
include_tasks: image_facts.yml
|
||||
with_items: "{{ scaleway_images['results'] }}"
|
||||
loop_control:
|
||||
loop_var: outer_item
|
||||
|
||||
- name: Create a server
|
||||
uri:
|
||||
|
@ -60,16 +80,17 @@
|
|||
method: POST
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
body:
|
||||
organization: "{{ organization_id }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
image: "{{ image_id }}"
|
||||
commercial_type: "{{cloud_providers.scaleway.size }}"
|
||||
enable_ipv6: true
|
||||
boot_type: local
|
||||
tags:
|
||||
- Environment:Algo
|
||||
- AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }}
|
||||
enable_ipv6: true
|
||||
status_code: 201
|
||||
body_format: json
|
||||
register: algo_instance
|
||||
|
@ -85,7 +106,7 @@
|
|||
method: POST
|
||||
headers:
|
||||
Content-Type: application/json
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
body:
|
||||
action: poweron
|
||||
status_code: 202
|
||||
|
@ -99,7 +120,7 @@
|
|||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
until:
|
||||
- algo_instance.json.server.state is defined
|
||||
|
@ -110,15 +131,7 @@
|
|||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ algo_instance['json']['server']['public_ip']['address'] }}"
|
||||
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ cloud_instance_ip }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: root
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: scaleway
|
||||
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
|
|
34
roles/cloud-scaleway/tasks/prompts.yml
Normal file
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your auth token (https://www.scaleway.com/docs/generate-an-api-token/)
|
||||
echo: false
|
||||
register: _scaleway_token
|
||||
when: scaleway_token is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your organization name (https://cloud.scaleway.com/#/billing)
|
||||
register: _scaleway_org
|
||||
when: scaleway_org is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
{% for r in scaleway_regions %}
|
||||
{{ loop.index }}. {{ r['alias'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ scaleway_regions.0.alias }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
||||
- name: Set scaleway facts
|
||||
set_fact:
|
||||
algo_scaleway_token: "{{ scaleway_token | default(_scaleway_token.user_input) }}"
|
||||
algo_scaleway_org: "{{ scaleway_org | default(_scaleway_org.user_input|default(omit)) }}"
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ scaleway_regions[_algo_region.user_input | int -1 ]['alias'] }}
|
||||
{%- else %}{{ scaleway_regions.0.alias }}{% endif %}
|
36
roles/cloud-vultr/tasks/main.yml
Normal file
|
@ -0,0 +1,36 @@
|
|||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Upload the SSH key
|
||||
vr_ssh_key:
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
ssh_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
register: ssh_key
|
||||
|
||||
- name: Creating a server
|
||||
vr_server:
|
||||
name: "{{ algo_server_name }}"
|
||||
hostname: "{{ algo_server_name }}"
|
||||
os: "{{ cloud_providers.vultr.os }}"
|
||||
plan: "{{ cloud_providers.vultr.size }}"
|
||||
region: "{{ algo_vultr_region }}"
|
||||
state: started
|
||||
tag: Environment:Algo
|
||||
ssh_key: "{{ ssh_key.vultr_ssh_key.name }}"
|
||||
ipv6_enabled: true
|
||||
auto_backup_enabled: false
|
||||
notify_activate: false
|
||||
register: vultr_server
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ vultr_server.vultr_server.v4_main_ip }}"
|
||||
ansible_ssh_user: root
|
||||
|
||||
environment:
|
||||
VULTR_API_CONFIG: "{{ algo_vultr_config }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
56
roles/cloud-vultr/tasks/prompts.yml
Normal file
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the local path to your configuration INI file
|
||||
(https://github.com/trailofbits/algo/docs/cloud-vultr.md):
|
||||
register: _vultr_config
|
||||
when: vultr_config is undefined
|
||||
|
||||
- name: Set the token as a fact
|
||||
set_fact:
|
||||
algo_vultr_config: "{{ vultr_config | default(_vultr_config.user_input) | default(lookup('env','VULTR_API_CONFIG'), true) }}"
|
||||
|
||||
- name: Get regions
|
||||
uri:
|
||||
url: https://api.vultr.com/v1/regions/list
|
||||
method: GET
|
||||
status_code: 200
|
||||
register: _vultr_regions
|
||||
|
||||
- name: Format regions
|
||||
set_fact:
|
||||
regions: >-
|
||||
[ {% for k, v in _vultr_regions.json.items() %}
|
||||
{{ v }}{% if not loop.last %},{% endif %}
|
||||
{% endfor %} ]
|
||||
|
||||
- name: Set regions as a fact
|
||||
set_fact:
|
||||
vultr_regions: "{{ regions | sort(attribute='country') }}"
|
||||
|
||||
- name: Set default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in vultr_regions %}
|
||||
{%- if r['DCID'] == "1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://www.vultr.com/locations/):
|
||||
{% for r in vultr_regions %}
|
||||
{{ loop.index }}. {{ r['name'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
||||
- name: Set the desired region as a fact
|
||||
set_fact:
|
||||
algo_vultr_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ vultr_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- else %}{{ vultr_regions[default_region | int - 1]['name'] }}{% endif %}
|
2
roles/common/defaults/main.yml
Normal file
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
install_headers: true
|
|
@ -7,8 +7,11 @@
|
|||
- name: flush routing cache
|
||||
shell: echo 1 > /proc/sys/net/ipv4/route/flush
|
||||
|
||||
- name: restart loopback
|
||||
shell: ifdown lo:100 && ifup lo:100
|
||||
- name: restart systemd-networkd
|
||||
systemd:
|
||||
name: systemd-networkd
|
||||
state: restarted
|
||||
daemon_reload: true
|
||||
|
||||
- name: restart loopback bsd
|
||||
shell: >
|
||||
|
|
30
roles/common/tasks/facts.yml
Normal file
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
- block:
|
||||
- name: Generate password for the CA key
|
||||
local_action:
|
||||
module: shell
|
||||
openssl rand -hex 16
|
||||
register: CA_password
|
||||
|
||||
- name: Generate p12 export password
|
||||
local_action:
|
||||
module: shell
|
||||
openssl rand 8 | python -c 'import sys,string; chars=string.ascii_letters + string.digits + "_@"; print "".join([chars[ord(c) % 64] for c in list(sys.stdin.read())])'
|
||||
register: p12_password_generated
|
||||
when: p12_password is not defined
|
||||
tags: update-users
|
||||
become: false
|
||||
|
||||
- name: Define facts
|
||||
set_fact:
|
||||
p12_export_password: "{{ p12_password|default(p12_password_generated.stdout) }}"
|
||||
tags: update-users
|
||||
|
||||
- set_fact:
|
||||
CA_password: "{{ CA_password.stdout }}"
|
||||
IP_subject_alt_name: "{{ IP_subject_alt_name }}"
|
||||
|
||||
- name: Set IPv6 support as a fact
|
||||
set_fact:
|
||||
ipv6_support: "{% if ansible_default_ipv6['gateway'] is defined %}true{% else %}false{% endif %}"
|
||||
tags: always
|
|
@ -1,6 +1,13 @@
|
|||
---
|
||||
|
||||
- set_fact:
|
||||
config_prefix: "/usr/local/"
|
||||
root_group: wheel
|
||||
ssh_service_name: sshd
|
||||
apparmor_enabled: false
|
||||
strongswan_additional_plugins:
|
||||
- kernel-pfroute
|
||||
- kernel-pfkey
|
||||
ansible_python_interpreter: /usr/local/bin/python2.7
|
||||
tools:
|
||||
- git
|
||||
- subversion
|
||||
|
@ -17,6 +24,15 @@
|
|||
tags:
|
||||
- always
|
||||
|
||||
- setup:
|
||||
|
||||
- name: Install tools
|
||||
package: name="{{ item }}" state=present
|
||||
with_items:
|
||||
- "{{ tools|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: Loopback included into the rc config
|
||||
blockinfile:
|
||||
dest: /etc/rc.conf
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
---
|
||||
- block:
|
||||
- include_tasks: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
- name: Check the system
|
||||
raw: uname -a
|
||||
register: OS
|
||||
|
||||
- include_tasks: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
- include_tasks: ubuntu.yml
|
||||
when: '"Ubuntu" in OS.stdout or "Linux" in OS.stdout'
|
||||
|
||||
- name: Install tools
|
||||
package: name="{{ item }}" state=present
|
||||
with_items:
|
||||
- "{{ tools|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
- include_tasks: freebsd.yml
|
||||
when: '"FreeBSD" in OS.stdout'
|
||||
|
||||
- name: Sysctl tuning
|
||||
sysctl: name="{{ item.item }}" value="{{ item.value }}"
|
||||
with_items:
|
||||
- "{{ sysctl|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
- name: Gather additional facts
|
||||
import_tasks: facts.yml
|
||||
|
||||
- meta: flush_handlers
|
||||
- name: Sysctl tuning
|
||||
sysctl: name="{{ item.item }}" value="{{ item.value }}"
|
||||
with_items:
|
||||
- "{{ sysctl|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
- meta: flush_handlers
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
|
|
@ -1,81 +1,88 @@
|
|||
---
|
||||
- block:
|
||||
- name: Ubuntu | Install prerequisites
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
update_cache: true
|
||||
with_items:
|
||||
- python2.7
|
||||
- sudo
|
||||
|
||||
- name: Ubuntu | Configure defaults
|
||||
alternatives:
|
||||
name: python
|
||||
link: /usr/bin/python
|
||||
path: /usr/bin/python2.7
|
||||
priority: 1
|
||||
tags:
|
||||
- update-alternatives
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
- name: Gather facts
|
||||
setup:
|
||||
|
||||
- name: Cloud only tasks
|
||||
block:
|
||||
- name: Install software updates
|
||||
apt:
|
||||
update_cache: true
|
||||
install_recommends: true
|
||||
upgrade: dist
|
||||
- name: Install software updates
|
||||
apt:
|
||||
update_cache: true
|
||||
install_recommends: true
|
||||
upgrade: dist
|
||||
|
||||
- name: Upgrade the ca certificates
|
||||
apt:
|
||||
name: ca-certificates
|
||||
state: latest
|
||||
- name: Check if reboot is required
|
||||
shell: >
|
||||
if [[ -e /var/run/reboot-required ]]; then echo "required"; else echo "no"; fi
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: reboot_required
|
||||
|
||||
- name: Check if reboot is required
|
||||
shell: >
|
||||
if [[ -e /var/run/reboot-required ]]; then echo "required"; else echo "no"; fi
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: reboot_required
|
||||
- name: Reboot
|
||||
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
|
||||
async: 1
|
||||
poll: 0
|
||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||
ignore_errors: true
|
||||
|
||||
- name: Reboot
|
||||
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
|
||||
async: 1
|
||||
poll: 0
|
||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||
ignore_errors: true
|
||||
- name: Wait until SSH becomes ready...
|
||||
local_action:
|
||||
module: wait_for
|
||||
port: 22
|
||||
host: "{{ inventory_hostname }}"
|
||||
search_regex: OpenSSH
|
||||
delay: 10
|
||||
timeout: 320
|
||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||
become: false
|
||||
when: algo_provider != "local"
|
||||
|
||||
- name: Wait until SSH becomes ready...
|
||||
local_action:
|
||||
module: wait_for
|
||||
port: 22
|
||||
host: "{{ inventory_hostname }}"
|
||||
search_regex: OpenSSH
|
||||
delay: 10
|
||||
timeout: 320
|
||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||
become: false
|
||||
- name: Include unatteded upgrades configuration
|
||||
import_tasks: unattended-upgrades.yml
|
||||
|
||||
- name: Include unatteded upgrades configuration
|
||||
include_tasks: unattended-upgrades.yml
|
||||
|
||||
- name: Disable MOTD on login and SSHD
|
||||
replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}"
|
||||
with_items:
|
||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/login' }
|
||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/sshd' }
|
||||
tags:
|
||||
- cloud
|
||||
|
||||
- name: Install system specific tools
|
||||
package: name="{{ item }}" state=present
|
||||
- name: Disable MOTD on login and SSHD
|
||||
replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}"
|
||||
with_items:
|
||||
- ifupdown
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: Ensure the interfaces directory exists
|
||||
file:
|
||||
path: /etc/network/interfaces.d/
|
||||
state: directory
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: root
|
||||
tags:
|
||||
- always
|
||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/login' }
|
||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/sshd' }
|
||||
|
||||
- name: Loopback for services configured
|
||||
template: src=10-loopback-services.cfg.j2 dest=/etc/network/interfaces.d/10-loopback-services.cfg
|
||||
template:
|
||||
src: 10-algo-lo100.network.j2
|
||||
dest: /etc/systemd/network/10-algo-lo100.network
|
||||
notify:
|
||||
- restart loopback
|
||||
- restart systemd-networkd
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: Loopback included into the network config
|
||||
lineinfile: dest=/etc/network/interfaces line='source /etc/network/interfaces.d/10-loopback-services.cfg' state=present
|
||||
notify:
|
||||
- restart loopback
|
||||
- name: systemd services enabled and started
|
||||
systemd:
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
with_items:
|
||||
- systemd-networkd
|
||||
- systemd-resolved
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
@ -111,3 +118,19 @@
|
|||
value: 1
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: Install tools
|
||||
package: name="{{ item }}" state=present
|
||||
with_items:
|
||||
- "{{ tools|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: Install headers
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
when: install_headers
|
||||
with_items:
|
||||
- linux-headers-generic
|
||||
- "linux-headers-{{ ansible_kernel }}"
|
||||
|
|
|
@ -19,3 +19,11 @@
|
|||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
- name: Unattended reboots configured
|
||||
template:
|
||||
src: 60unattended-reboot.j2
|
||||
dest: /etc/apt/apt.conf.d/60unattended-reboot
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
|
7
roles/common/templates/10-algo-lo100.network.j2
Normal file
|
@ -0,0 +1,7 @@
|
|||
[Match]
|
||||
Name=lo
|
||||
|
||||
[Network]
|
||||
Label=lo:100
|
||||
Address={{ local_service_ip }}/32
|
||||
Address=FCAA::1/64
|
|
@ -1,9 +0,0 @@
|
|||
auto lo:100
|
||||
iface lo:100 inet static
|
||||
address {{ local_service_ip }}
|
||||
netmask 255.255.255.255
|
||||
|
||||
iface lo:100 inet6 static
|
||||
address FCAA::1
|
||||
netmask 64
|
||||
autoconf 0
|
2
roles/common/templates/60unattended-reboot.j2
Normal file
|
@ -0,0 +1,2 @@
|
|||
Unattended-Upgrade::Automatic-Reboot "{{ unattended_reboot.enabled|lower }}";
|
||||
Unattended-Upgrade::Automatic-Reboot-Time "{{ unattended_reboot.time }}";
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
|
||||
dependencies:
|
||||
- { role: common, tags: common }
|
||||
- role: dns_encryption
|
||||
tags: dns_encryption
|
||||
when: dns_encryption == true
|
|
@ -1,16 +1,8 @@
|
|||
---
|
||||
- block:
|
||||
|
||||
- name: The DNS tag is defined
|
||||
set_fact:
|
||||
local_dns: true
|
||||
|
||||
- name: Dnsmasq installed
|
||||
package: name=dnsmasq
|
||||
|
||||
- name: Ensure that the dnsmasq user exist
|
||||
user: name=dnsmasq groups=nogroup append=yes state=present
|
||||
|
||||
- name: The dnsmasq directory created
|
||||
file: dest=/var/lib/dnsmasq state=directory mode=0755 owner=dnsmasq group=nogroup
|
||||
|
||||
|
|
|
@ -88,12 +88,13 @@ no-resolv
|
|||
# You can control how dnsmasq talks to a server: this forces
|
||||
# queries to 10.1.2.3 to be routed via eth1
|
||||
# server=10.1.2.3@eth1
|
||||
{% if dns_encryption|default(false)|bool == true %}
|
||||
{% if dns_encryption %}
|
||||
server={{ local_service_ip }}#5353
|
||||
{% else %}
|
||||
{% for host in dns_servers.ipv4 %}
|
||||
server={{ host }}
|
||||
{% endfor %}
|
||||
stop-dns-rebind
|
||||
{% endif %}
|
||||
|
||||
# and this sets the source (ie local) address used to talk to
|
||||
|
@ -103,7 +104,7 @@ server={{ host }}
|
|||
|
||||
# If you want dnsmasq to change uid and gid to something other
|
||||
# than the default, edit the following lines.
|
||||
user=nobody
|
||||
user=dnsmasq
|
||||
group=nogroup
|
||||
|
||||
# If you want dnsmasq to listen for DHCP and DNS requests only on
|
||||
|
|
|
@ -1,7 +1,13 @@
|
|||
---
|
||||
listen_port: "{% if local_dns|d(false)|bool == true %}5353{% else %}53{% endif %}"
|
||||
algo_local_dns: false
|
||||
listen_port: "{% if algo_local_dns %}5353{% else %}53{% endif %}"
|
||||
# the version used if the latest unavailable (in case of Github API rate limited)
|
||||
dnscrypt_proxy_version: 2.0.10
|
||||
apparmor_enabled: true
|
||||
dns_encryption: true
|
||||
dns_encryption_provider: "*"
|
||||
ipv6_support: false
|
||||
dnscrypt_servers:
|
||||
ipv4:
|
||||
- cloudflare
|
||||
ipv6:
|
||||
- cloudflare-ipv6
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
// Automatically upgrade packages from these (origin:archive) pairs
|
||||
Unattended-Upgrade::Allowed-Origins {
|
||||
"LP-PPA-shevchuk-dnscrypt-proxy:${distro_codename}";
|
||||
};
|
|
@ -1,38 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# PROVIDE: dnscrypt-proxy
|
||||
# REQUIRE: LOGIN
|
||||
# BEFORE: securelevel
|
||||
# KEYWORD: shutdown
|
||||
|
||||
# Add the following lines to /etc/rc.conf to enable `dnscrypt-proxy':
|
||||
#
|
||||
# dnscrypt_proxy_enable="YES"
|
||||
# dnscrypt_proxy_flags="<set as needed>"
|
||||
#
|
||||
# See rsync(1) for rsyncd_flags
|
||||
#
|
||||
|
||||
. /etc/rc.subr
|
||||
|
||||
name="dnscrypt-proxy"
|
||||
rcvar=dnscrypt_proxy_enable
|
||||
load_rc_config "$name"
|
||||
pidfile="/var/run/$name.pid"
|
||||
start_cmd=dnscrypt_proxy_start
|
||||
stop_postcmd=dnscrypt_proxy_stop
|
||||
|
||||
: ${dnscrypt_proxy_enable="NO"}
|
||||
: ${dnscrypt_proxy_flags="-config /usr/local/etc/dnscrypt-proxy/dnscrypt-proxy.toml"}
|
||||
|
||||
dnscrypt_proxy_start() {
|
||||
echo "Starting dnscrypt-proxy..."
|
||||
touch ${pidfile}
|
||||
/usr/sbin/daemon -cS -T dnscrypt-proxy -p ${pidfile} /usr/dnscrypt-proxy/freebsd-amd64/dnscrypt-proxy ${dnscrypt_proxy_flags}
|
||||
}
|
||||
|
||||
dnscrypt_proxy_stop() {
|
||||
[ -f ${pidfile} ] && rm ${pidfile}
|
||||
}
|
||||
|
||||
run_rc_command "$1"
|