mirror of
https://github.com/trailofbits/algo.git
synced 2025-04-11 11:47:08 +02:00
Large refactor to support Ansible 2.5 (#976)
* Refactoring, booleans declaration and update users fix * Make server_name more FQDN compatible * Rename variables * Define the default value for store_cakey * Skip a prompt about the SSH user if deploying to localhost * Disable reboot for non-cloud deployments * Enable EC2 volume encryption by default * Add default server value (localhost) for the local installation Delete empty files * Add default region to aws_region_facts * Update docs * EC2 credentials fix * Warnings fix * Update deploy-from-ansible.md * Fix a typo * Remove lightsail from the docs * Disable EC2 encryption by default * rename droplet to server * Disable dependencies * Disable tls_cipher_suite * Convert wifi-exclude to a string. Update-users fix * SSH access congrats fix * 16.04 > 18.04 * Dont ask for the credentials if specified in the environment vars * GCE server name fix
This commit is contained in:
parent
a57a0adf5e
commit
e8947f318b
90 changed files with 1774 additions and 2031 deletions
|
@ -42,7 +42,6 @@ before_cache:
|
|||
- sudo chown $USER. $HOME/lxc/cache.tar
|
||||
|
||||
env:
|
||||
- LXC_NAME=ubuntu1804 LXC_DISTRO=ubuntu LXC_RELEASE=18.04
|
||||
- LXC_NAME=docker LXC_DISTRO=ubuntu LXC_RELEASE=18.04
|
||||
|
||||
before_install:
|
||||
|
@ -67,8 +66,8 @@ install:
|
|||
script:
|
||||
# - awesome_bot --allow-dupe --skip-save-results *.md docs/*.md --white-list paypal.com,do.co,microsoft.com,https://github.com/trailofbits/algo/archive/master.zip,https://github.com/trailofbits/algo/issues/new
|
||||
# - shellcheck algo
|
||||
# - ansible-lint deploy.yml users.yml deploy_client.yml
|
||||
- ansible-playbook deploy.yml --syntax-check
|
||||
# - ansible-lint main.yml users.yml deploy_client.yml
|
||||
- ansible-playbook main.yml --syntax-check
|
||||
- ./tests/local-deploy.sh
|
||||
- ./tests/update-users.sh
|
||||
|
||||
|
|
641
algo
641
algo
|
@ -14,642 +14,9 @@ then
|
|||
fi
|
||||
fi
|
||||
|
||||
SKIP_TAGS="_null encrypted"
|
||||
ADDITIONAL_PROMPT="[pasted values will not be displayed]"
|
||||
|
||||
additional_roles () {
|
||||
|
||||
read -p "
|
||||
Do you want macOS/iOS clients to enable \"VPN On Demand\" when connected to cellular networks?
|
||||
[y/N]: " -r OnDemandEnabled_Cellular
|
||||
OnDemandEnabled_Cellular=${OnDemandEnabled_Cellular:-n}
|
||||
if [[ "$OnDemandEnabled_Cellular" =~ ^(y|Y)$ ]]; then EXTRA_VARS+=" OnDemandEnabled_Cellular=Y"; fi
|
||||
|
||||
read -p "
|
||||
Do you want macOS/iOS clients to enable \"VPN On Demand\" when connected to Wi-Fi?
|
||||
[y/N]: " -r OnDemandEnabled_WIFI
|
||||
OnDemandEnabled_WIFI=${OnDemandEnabled_WIFI:-n}
|
||||
if [[ "$OnDemandEnabled_WIFI" =~ ^(y|Y)$ ]]; then EXTRA_VARS+=" OnDemandEnabled_WIFI=Y"; fi
|
||||
|
||||
if [[ "$OnDemandEnabled_WIFI" =~ ^(y|Y)$ ]]; then
|
||||
read -p "
|
||||
List the names of trusted Wi-Fi networks (if any) that macOS/iOS clients exclude from using the VPN (e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
|
||||
: " -r OnDemandEnabled_WIFI_EXCLUDE
|
||||
OnDemandEnabled_WIFI_EXCLUDE=${OnDemandEnabled_WIFI_EXCLUDE:-_null}
|
||||
EXTRA_VARS+=" OnDemandEnabled_WIFI_EXCLUDE=\"$OnDemandEnabled_WIFI_EXCLUDE\""
|
||||
fi
|
||||
|
||||
read -p "
|
||||
Do you want to install a DNS resolver on this VPN server, to block ads while surfing?
|
||||
[y/N]: " -r dns_enabled
|
||||
dns_enabled=${dns_enabled:-n}
|
||||
if [[ "$dns_enabled" =~ ^(y|Y)$ ]]; then ROLES+=" dns"; EXTRA_VARS+=" local_dns=true"; fi
|
||||
|
||||
read -p "
|
||||
Do you want each user to have their own account for SSH tunneling?
|
||||
[y/N]: " -r ssh_tunneling_enabled
|
||||
ssh_tunneling_enabled=${ssh_tunneling_enabled:-n}
|
||||
if [[ "$ssh_tunneling_enabled" =~ ^(y|Y)$ ]]; then ROLES+=" ssh_tunneling"; fi
|
||||
|
||||
read -p "
|
||||
Do you want the VPN to support Windows 10 or Linux Desktop clients? (enables compatible ciphers and key exchange, less secure)
|
||||
[y/N]: " -r Win10_Enabled
|
||||
Win10_Enabled=${Win10_Enabled:-n}
|
||||
if [[ "$Win10_Enabled" =~ ^(y|Y)$ ]]; then EXTRA_VARS+=" Win10_Enabled=Y"; fi
|
||||
|
||||
read -p "
|
||||
Do you want to retain the CA key? (required to add users in the future, but less secure)
|
||||
[y/N]: " -r Store_CAKEY
|
||||
Store_CAKEY=${Store_CAKEY:-N}
|
||||
if [[ "$Store_CAKEY" =~ ^(n|N)$ ]]; then EXTRA_VARS+=" Store_CAKEY=N"; fi
|
||||
|
||||
}
|
||||
|
||||
deploy () {
|
||||
|
||||
ansible-playbook deploy.yml -t "${ROLES// /,}" -e "${EXTRA_VARS}" --skip-tags "${SKIP_TAGS// /,}"
|
||||
|
||||
}
|
||||
|
||||
azure () {
|
||||
read -p "
|
||||
Enter your azure secret id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs azure_secret
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your azure tenant id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs azure_tenant
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your azure client id (application id) (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs azure_client_id
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your azure subscription id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs azure_subscription_id
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo]: " -r azure_server_name
|
||||
azure_server_name=${azure_server_name:-algo}
|
||||
|
||||
read -p "
|
||||
|
||||
What region should the server be located in? (https://azure.microsoft.com/en-us/regions/)
|
||||
1. East US (Virginia)
|
||||
2. East US 2 (Virginia)
|
||||
3. Central US (Iowa)
|
||||
4. North Central US (Illinois)
|
||||
5. South Central US (Texas)
|
||||
6. West Central US (Wyoming)
|
||||
7. West US (California)
|
||||
8. West US 2 (Washington)
|
||||
9. Canada East (Quebec City)
|
||||
10. Canada Central (Toronto)
|
||||
11. Brazil South (Sao Paulo State)
|
||||
12. North Europe (Ireland)
|
||||
13. West Europe (Netherlands)
|
||||
14. France Central (Paris)
|
||||
15. France South (Marseille)
|
||||
16. UK West (Cardiff)
|
||||
17. UK South (London)
|
||||
18. Germany Central (Frankfurt)
|
||||
19. Germany Northeast (Magdeburg)
|
||||
20. Southeast Asia (Singapore)
|
||||
21. East Asia (Hong Kong)
|
||||
22. Australia East (New South Wales)
|
||||
23. Australia Southeast (Victoria)
|
||||
24. Australia Central (Canberra)
|
||||
25. Australia Central 2 (Canberra)
|
||||
26. Central India (Pune)
|
||||
27. West India (Mumbai)
|
||||
28. South India (Chennai)
|
||||
29. Japan East (Tokyo, Saitama)
|
||||
30. Japan West (Osaka)
|
||||
31. Korea Central (Seoul)
|
||||
32. Korea South (Busan)
|
||||
|
||||
Enter the number of your desired region:
|
||||
[1]: " -r azure_region
|
||||
azure_region=${azure_region:-1}
|
||||
|
||||
case "$azure_region" in
|
||||
1) region="eastus" ;;
|
||||
2) region="eastus2" ;;
|
||||
3) region="centralus" ;;
|
||||
4) region="northcentralus" ;;
|
||||
5) region="southcentralus" ;;
|
||||
6) region="westcentralus" ;;
|
||||
7) region="westus" ;;
|
||||
8) region="westus2" ;;
|
||||
9) region="canadaeast" ;;
|
||||
10) region="canadacentral" ;;
|
||||
11) region="brazilsouth" ;;
|
||||
12) region="northeurope" ;;
|
||||
13) region="westeurope" ;;
|
||||
14) region="francecentral" ;;
|
||||
15) region="francesouth" ;;
|
||||
16) region="ukwest" ;;
|
||||
17) region="uksouth" ;;
|
||||
18) region="germanycentral" ;;
|
||||
19) region="germanynortheast" ;;
|
||||
20) region="southeastasia" ;;
|
||||
21) region="eastasia" ;;
|
||||
22) region="australiaeast" ;;
|
||||
23) region="australiasoutheast" ;;
|
||||
24) region="australiacentral" ;;
|
||||
25) region="australiacentral2" ;;
|
||||
26) region="centralindia" ;;
|
||||
27) region="westindia" ;;
|
||||
28) region="southindia" ;;
|
||||
29) region="japaneast" ;;
|
||||
30) region="japanwest" ;;
|
||||
31) region="koreacentral" ;;
|
||||
32) region="koreasouth" ;;
|
||||
esac
|
||||
|
||||
ROLES="azure vpn cloud"
|
||||
EXTRA_VARS="azure_secret=$azure_secret azure_tenant=$azure_tenant azure_client_id=$azure_client_id azure_subscription_id=$azure_subscription_id azure_server_name=$azure_server_name ssh_public_key=$ssh_public_key region=$region"
|
||||
}
|
||||
|
||||
digitalocean () {
|
||||
read -p "
|
||||
Enter your API token. The token must have read and write permissions (https://cloud.digitalocean.com/settings/api/tokens):
|
||||
$ADDITIONAL_PROMPT
|
||||
: " -rs do_access_token
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo.local]: " -r do_server_name
|
||||
do_server_name=${do_server_name:-algo.local}
|
||||
|
||||
read -p "
|
||||
|
||||
What region should the server be located in?
|
||||
1. Amsterdam (Datacenter 2)
|
||||
2. Amsterdam (Datacenter 3)
|
||||
3. Frankfurt
|
||||
4. London
|
||||
5. New York (Datacenter 1)
|
||||
6. New York (Datacenter 2)
|
||||
7. New York (Datacenter 3)
|
||||
8. San Francisco (Datacenter 1)
|
||||
9. San Francisco (Datacenter 2)
|
||||
10. Singapore
|
||||
11. Toronto
|
||||
12. Bangalore
|
||||
|
||||
Enter the number of your desired region:
|
||||
[7]: " -r region
|
||||
region=${region:-7}
|
||||
|
||||
case "$region" in
|
||||
1) do_region="ams2" ;;
|
||||
2) do_region="ams3" ;;
|
||||
3) do_region="fra1" ;;
|
||||
4) do_region="lon1" ;;
|
||||
5) do_region="nyc1" ;;
|
||||
6) do_region="nyc2" ;;
|
||||
7) do_region="nyc3" ;;
|
||||
8) do_region="sfo1" ;;
|
||||
9) do_region="sfo2" ;;
|
||||
10) do_region="sgp1" ;;
|
||||
11) do_region="tor1" ;;
|
||||
12) do_region="blr1" ;;
|
||||
esac
|
||||
|
||||
ROLES="digitalocean vpn cloud"
|
||||
EXTRA_VARS="do_access_token=$do_access_token do_server_name=$do_server_name do_region=$do_region"
|
||||
}
|
||||
|
||||
ec2 () {
|
||||
read -p "
|
||||
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md).
|
||||
$ADDITIONAL_PROMPT
|
||||
[AKIA...]: " -rs aws_access_key
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
$ADDITIONAL_PROMPT
|
||||
[ABCD...]: " -rs aws_secret_key
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo]: " -r aws_server_name
|
||||
aws_server_name=${aws_server_name:-algo}
|
||||
|
||||
read -p "
|
||||
|
||||
What region should the server be located in?
|
||||
1. us-east-1 US East (N. Virginia)
|
||||
2. us-east-2 US East (Ohio)
|
||||
3. us-west-1 US West (N. California)
|
||||
4. us-west-2 US West (Oregon)
|
||||
5. ca-central-1 Canada (Central)
|
||||
6. eu-central-1 EU (Frankfurt)
|
||||
7. eu-west-1 EU (Ireland)
|
||||
8. eu-west-2 EU (London)
|
||||
9. eu-west-3 EU (Paris)
|
||||
10. ap-northeast-1 Asia Pacific (Tokyo)
|
||||
11. ap-northeast-2 Asia Pacific (Seoul)
|
||||
12. ap-northeast-3 Asia Pacific (Osaka-Local)
|
||||
13. ap-southeast-1 Asia Pacific (Singapore)
|
||||
14. ap-southeast-2 Asia Pacific (Sydney)
|
||||
15. ap-south-1 Asia Pacific (Mumbai)
|
||||
16. sa-east-1 South America (São Paulo)
|
||||
|
||||
Enter the number of your desired region:
|
||||
[1]: " -r aws_region
|
||||
aws_region=${aws_region:-1}
|
||||
|
||||
case "$aws_region" in
|
||||
1) region="us-east-1" ;;
|
||||
2) region="us-east-2" ;;
|
||||
3) region="us-west-1" ;;
|
||||
4) region="us-west-2" ;;
|
||||
5) region="ca-central-1" ;;
|
||||
6) region="eu-central-1" ;;
|
||||
7) region="eu-west-1" ;;
|
||||
8) region="eu-west-2" ;;
|
||||
9) region="eu-west-3" ;;
|
||||
10) region="ap-northeast-1" ;;
|
||||
11) region="ap-northeast-2" ;;
|
||||
12) region="ap-northeast-3";;
|
||||
13) region="ap-southeast-1" ;;
|
||||
14) region="ap-southeast-2" ;;
|
||||
15) region="ap-south-1" ;;
|
||||
16) region="sa-east-1" ;;
|
||||
esac
|
||||
|
||||
ROLES="ec2 vpn cloud"
|
||||
EXTRA_VARS="aws_access_key=$aws_access_key aws_secret_key=$aws_secret_key aws_server_name=$aws_server_name region=$region"
|
||||
}
|
||||
|
||||
lightsail () {
|
||||
read -p "
|
||||
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md).
|
||||
$ADDITIONAL_PROMPT
|
||||
[AKIA...]: " -rs aws_access_key
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
$ADDITIONAL_PROMPT
|
||||
[ABCD...]: " -rs aws_secret_key
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo.local]: " -r algo_server_name
|
||||
algo_server_name=${algo_server_name:-algo.local}
|
||||
|
||||
read -p "
|
||||
|
||||
What region should the server be located in?
|
||||
1. us-east-1 US East (N. Virginia)
|
||||
2. us-east-2 US East (Ohio)
|
||||
3. us-west-1 US West (N. California)
|
||||
4. us-west-2 US West (Oregon)
|
||||
5. ap-south-1 Asia Pacific (Mumbai)
|
||||
6. ap-northeast-2 Asia Pacific (Seoul)
|
||||
7. ap-southeast-1 Asia Pacific (Singapore)
|
||||
8. ap-southeast-2 Asia Pacific (Sydney)
|
||||
9. ap-northeast-1 Asia Pacific (Tokyo)
|
||||
10. eu-central-1 EU (Frankfurt)
|
||||
11. eu-west-1 EU (Ireland)
|
||||
12. eu-west-2 EU (London)
|
||||
|
||||
Enter the number of your desired region:
|
||||
[1]: " -r algo_region
|
||||
algo_region=${algo_region:-1}
|
||||
|
||||
case "$algo_region" in
|
||||
1) region="us-east-1" ;;
|
||||
2) region="us-east-2" ;;
|
||||
3) region="us-west-1" ;;
|
||||
4) region="us-west-2" ;;
|
||||
5) region="ap-south-1" ;;
|
||||
6) region="ap-northeast-2" ;;
|
||||
7) region="ap-southeast-1" ;;
|
||||
8) region="ap-southeast-2" ;;
|
||||
9) region="ap-northeast-1" ;;
|
||||
10) region="eu-central-1" ;;
|
||||
11) region="eu-west-1" ;;
|
||||
12) region="eu-west-2";;
|
||||
esac
|
||||
|
||||
ROLES="lightsail vpn cloud"
|
||||
EXTRA_VARS="aws_access_key=$aws_access_key aws_secret_key=$aws_secret_key algo_server_name=$algo_server_name region=$region"
|
||||
}
|
||||
|
||||
scaleway () {
|
||||
read -p "
|
||||
Enter your auth token (https://www.scaleway.com/docs/generate-an-api-token/)
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs scaleway_auth_token
|
||||
|
||||
read -p "
|
||||
|
||||
Enter your organization name (https://cloud.scaleway.com/#/billing)
|
||||
$ADDITIONAL_PROMPT
|
||||
[...]: " -rs scaleway_organization
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo.local]: " -r algo_server_name
|
||||
algo_server_name=${algo_server_name:-algo.local}
|
||||
|
||||
read -p "
|
||||
|
||||
What region should the server be located in?
|
||||
1. par1 Paris
|
||||
2. ams1 Amsterdam
|
||||
Enter the number of your desired region:
|
||||
[1]: " -r algo_region
|
||||
algo_region=${algo_region:-1}
|
||||
|
||||
case "$algo_region" in
|
||||
1) region="par1" ;;
|
||||
2) region="ams1" ;;
|
||||
esac
|
||||
|
||||
ROLES="scaleway vpn cloud"
|
||||
EXTRA_VARS="scaleway_auth_token=$scaleway_auth_token scaleway_organization=\"$scaleway_organization\" algo_server_name=$algo_server_name algo_region=$region"
|
||||
}
|
||||
|
||||
openstack () {
|
||||
read -p "
|
||||
Enter the local path to your credentials OpenStack RC file (Can be downloaded from the OpenStack dashboard->Compute->API Access)
|
||||
[...]: " -r os_rc
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo.local]: " -r algo_server_name
|
||||
algo_server_name=${algo_server_name:-algo.local}
|
||||
|
||||
ROLES="openstack vpn cloud"
|
||||
EXTRA_VARS="algo_server_name=$algo_server_name"
|
||||
source $os_rc
|
||||
}
|
||||
|
||||
gce () {
|
||||
read -p "
|
||||
Enter the local path to your credentials JSON file (https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts):
|
||||
: " -r credentials_file
|
||||
|
||||
read -p "
|
||||
|
||||
Name the vpn server:
|
||||
[algo]: " -r server_name
|
||||
server_name=${server_name:-algo}
|
||||
|
||||
read -p "
|
||||
|
||||
What zone should the server be located in?
|
||||
1. Eastern Canada (Montreal A)
|
||||
2. Eastern Canada (Montreal B)
|
||||
3. Eastern Canada (Montreal C)
|
||||
4. Central US (Iowa A)
|
||||
5. Central US (Iowa B)
|
||||
6. Central US (Iowa C)
|
||||
7. Central US (Iowa F)
|
||||
8. Western US (Oregon A)
|
||||
9. Western US (Oregon B)
|
||||
10. Western US (Oregon C)
|
||||
11. Eastern US (Northern Virginia A)
|
||||
12. Eastern US (Northern Virginia B)
|
||||
13. Eastern US (Northern Virginia C)
|
||||
14. Eastern US (South Carolina B)
|
||||
15. Eastern US (South Carolina C)
|
||||
16. Eastern US (South Carolina D)
|
||||
17. South America East (São Paulo A)
|
||||
18. South America East (São Paulo B)
|
||||
19. South America East (São Paulo C)
|
||||
20. Northern Europe (Hamina A)
|
||||
21. Northern Europe (Hamina B)
|
||||
22. Northern Europe (Hamina C)
|
||||
23. Western Europe (Belgium B)
|
||||
24. Western Europe (Belgium C)
|
||||
25. Western Europe (Belgium D)
|
||||
26. Western Europe (London A)
|
||||
27. Western Europe (London B)
|
||||
28. Western Europe (London C)
|
||||
29. Western Europe (Frankfurt A)
|
||||
30. Western Europe (Frankfurt B)
|
||||
31. Western Europe (Frankfurt C)
|
||||
32. Western Europe (Netherlands A)
|
||||
33. Western Europe (Netherlands B)
|
||||
34. Western Europe (Netherlands C)
|
||||
35. South Asia (Mumbai A)
|
||||
36. South Asia (Mumbai B)
|
||||
37. South Asia (Mumbai C)
|
||||
38. Southeast Asia (Singapore A)
|
||||
39. Southeast Asia (Singapore B)
|
||||
40. Southeast Asia (Singapore C)
|
||||
41. East Asia (Taiwan A)
|
||||
42. East Asia (Taiwan B)
|
||||
43. East Asia (Taiwan C)
|
||||
44. Northeast Asia (Tokyo A)
|
||||
45. Northeast Asia (Tokyo B)
|
||||
46. Northeast Asia (Tokyo C)
|
||||
47. Australia (Sydney A)
|
||||
48. Australia (Sydney B)
|
||||
49. Australia (Sydney C)
|
||||
|
||||
Please choose the number of your zone. Press enter for default (#20) zone.
|
||||
[20]: " -r region
|
||||
region=${region:-20}
|
||||
|
||||
case "$region" in
|
||||
1) zone="northamerica-northeast1-a" ;;
|
||||
2) zone="northamerica-northeast1-b" ;;
|
||||
3) zone="northamerica-northeast1-c" ;;
|
||||
4) zone="us-central1-a" ;;
|
||||
5) zone="us-central1-b" ;;
|
||||
6) zone="us-central1-c" ;;
|
||||
7) zone="us-central1-f" ;;
|
||||
8) zone="us-west1-a" ;;
|
||||
9) zone="us-west1-b" ;;
|
||||
10) zone="us-west1-c" ;;
|
||||
11) zone="us-east4-a" ;;
|
||||
12) zone="us-east4-b" ;;
|
||||
13) zone="us-east4-c" ;;
|
||||
14) zone="us-east1-b" ;;
|
||||
15) zone="us-east1-c" ;;
|
||||
16) zone="us-east1-d" ;;
|
||||
17) zone="southamerica-east1-a" ;;
|
||||
18) zone="southamerica-east1-b" ;;
|
||||
19) zone="southamerica-east1-c" ;;
|
||||
20) zone="europe-north1-a" ;;
|
||||
21) zone="europe-north1-b" ;;
|
||||
22) zone="europe-north1-c" ;;
|
||||
23) zone="europe-west1-b" ;;
|
||||
24) zone="europe-west1-c" ;;
|
||||
25) zone="europe-west1-d" ;;
|
||||
26) zone="europe-west2-a" ;;
|
||||
27) zone="europe-west2-b" ;;
|
||||
28) zone="europe-west2-c" ;;
|
||||
29) zone="europe-west3-a" ;;
|
||||
30) zone="europe-west3-b" ;;
|
||||
31) zone="europe-west3-c" ;;
|
||||
32) zone="europe-west4-a" ;;
|
||||
33) zone="europe-west4-b" ;;
|
||||
34) zone="europe-west4-c" ;;
|
||||
35) zone="asia-south1-a" ;;
|
||||
36) zone="asia-south1-b" ;;
|
||||
37) zone="asia-south1-c" ;;
|
||||
38) zone="asia-southeast1-a" ;;
|
||||
39) zone="asia-southeast1-b" ;;
|
||||
40) zone="asia-southeast1-c" ;;
|
||||
41) zone="asia-east1-a" ;;
|
||||
42) zone="asia-east1-b" ;;
|
||||
43) zone="asia-east1-c" ;;
|
||||
44) zone="asia-northeast1-a" ;;
|
||||
45) zone="asia-northeast1-b" ;;
|
||||
46) zone="asia-northeast1-c" ;;
|
||||
47) zone="australia-southeast1-a" ;;
|
||||
48) zone="australia-southeast1-b" ;;
|
||||
49) zone="australia-southeast1-c" ;;
|
||||
esac
|
||||
|
||||
ROLES="gce vpn cloud"
|
||||
EXTRA_VARS="credentials_file=$credentials_file gce_server_name=$server_name ssh_public_key=$ssh_public_key zone=$zone max_mss=1316"
|
||||
}
|
||||
|
||||
non_cloud () {
|
||||
read -p "
|
||||
Enter the IP address of your server: (or use localhost for local installation)
|
||||
[localhost]: " -r server_ip
|
||||
server_ip=${server_ip:-localhost}
|
||||
|
||||
read -p "
|
||||
|
||||
What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost)
|
||||
[root]: " -r server_user
|
||||
server_user=${server_user:-root}
|
||||
|
||||
if [ "x${server_ip}" = "xlocalhost" ]; then
|
||||
myip=""
|
||||
else
|
||||
myip=${server_ip}
|
||||
fi
|
||||
|
||||
read -p "
|
||||
|
||||
Enter the public IP address of your server: (IMPORTANT! This IP is used to verify the certificate)
|
||||
[$myip]: " -r IP_subject
|
||||
IP_subject=${IP_subject:-$myip}
|
||||
|
||||
if [ "x${IP_subject}" = "x" ]; then
|
||||
echo "no server IP given. exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ROLES="local vpn"
|
||||
EXTRA_VARS="server_ip=$server_ip server_user=$server_user IP_subject_alt_name=$IP_subject"
|
||||
SKIP_TAGS+=" cloud update-alternatives"
|
||||
|
||||
read -p "
|
||||
|
||||
Was this server deployed by Algo previously?
|
||||
[y/N]: " -r Deployed_By_Algo
|
||||
Deployed_By_Algo=${Deployed_By_Algo:-n}
|
||||
if [[ "$Deployed_By_Algo" =~ ^(y|Y)$ ]]; then EXTRA_VARS+=" Deployed_By_Algo=Y"; fi
|
||||
|
||||
}
|
||||
|
||||
algo_provisioning () {
|
||||
echo -n "
|
||||
What provider would you like to use?
|
||||
1. DigitalOcean
|
||||
2. Amazon EC2
|
||||
3. Microsoft Azure
|
||||
4. Google Compute Engine
|
||||
5. Scaleway
|
||||
6. OpenStack (DreamCompute optimised)
|
||||
7. Install to existing Ubuntu 16.04 server (Advanced)
|
||||
|
||||
Enter the number of your desired provider
|
||||
: "
|
||||
|
||||
read -r N
|
||||
|
||||
case "$N" in
|
||||
1) digitalocean; ;;
|
||||
2) ec2; ;;
|
||||
3) azure; ;;
|
||||
4) gce; ;;
|
||||
5) scaleway; ;;
|
||||
6) openstack; ;;
|
||||
7) non_cloud; ;;
|
||||
*) exit 1 ;;
|
||||
esac
|
||||
|
||||
additional_roles
|
||||
deploy
|
||||
}
|
||||
|
||||
user_management () {
|
||||
|
||||
read -p "
|
||||
Enter the IP address of your server: (or use localhost for local installation)
|
||||
: " -r server_ip
|
||||
|
||||
read -p "
|
||||
What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost)
|
||||
[root]: " -r server_user
|
||||
server_user=${server_user:-root}
|
||||
|
||||
read -p "
|
||||
Do you want each user to have their own account for SSH tunneling?
|
||||
[y/N]: " -r ssh_tunneling_enabled
|
||||
ssh_tunneling_enabled=${ssh_tunneling_enabled:-n}
|
||||
|
||||
if [ "x${server_ip}" = "xlocalhost" ]; then
|
||||
myip=""
|
||||
else
|
||||
myip=${server_ip}
|
||||
fi
|
||||
|
||||
read -p "
|
||||
|
||||
Enter the public IP address of your server: (IMPORTANT! This IP is used to verify the certificate)
|
||||
[$myip]: " -r IP_subject
|
||||
IP_subject=${IP_subject:-$myip}
|
||||
|
||||
if [ "x${IP_subject}" = "x" ]; then
|
||||
echo "no server IP given. exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
read -p "
|
||||
Enter the password for the private CA key:
|
||||
$ADDITIONAL_PROMPT
|
||||
: " -rs easyrsa_CA_password
|
||||
|
||||
ansible-playbook users.yml -e "server_ip=$server_ip server_user=$server_user ssh_tunneling_enabled=$ssh_tunneling_enabled IP_subject_alt_name=$IP_subject easyrsa_CA_password=$easyrsa_CA_password" -t update-users --skip-tags common
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
update-users) user_management ;;
|
||||
*) algo_provisioning ;;
|
||||
update-users) PLAYBOOK=users.yml; ARGS="${@:2} -t update-users";;
|
||||
*) PLAYBOOK=main.yml; ARGS=${@} ;;
|
||||
esac
|
||||
|
||||
ansible-playbook ${PLAYBOOK} ${ARGS}
|
||||
|
|
|
@ -4,6 +4,7 @@ pipelining = True
|
|||
retry_files_enabled = False
|
||||
host_key_checking = False
|
||||
timeout = 60
|
||||
stdout_callback = full_skip
|
||||
|
||||
[paramiko_connection]
|
||||
record_host_keys = False
|
||||
|
|
49
cloud.yml
Normal file
49
cloud.yml
Normal file
|
@ -0,0 +1,49 @@
|
|||
---
|
||||
- name: Provision the server
|
||||
hosts: localhost
|
||||
tags: algo
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
pre_tasks:
|
||||
- block:
|
||||
- name: Local pre-tasks
|
||||
import_tasks: playbooks/cloud-pre.yml
|
||||
tags: always
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
roles:
|
||||
- role: cloud-digitalocean
|
||||
when: algo_provider == "digitalocean"
|
||||
- role: cloud-ec2
|
||||
when: algo_provider == "ec2"
|
||||
- role: cloud-vultr
|
||||
when: algo_provider == "vultr"
|
||||
- role: cloud-gce
|
||||
when: algo_provider == "gce"
|
||||
- role: cloud-azure
|
||||
when: algo_provider == "azure"
|
||||
- role: cloud-lightsail
|
||||
when: algo_provider == "lightsail"
|
||||
- role: cloud-scaleway
|
||||
when: algo_provider == "scaleway"
|
||||
- role: cloud-openstack
|
||||
when: algo_provider == "openstack"
|
||||
- role: local
|
||||
when: algo_provider == "local"
|
||||
|
||||
post_tasks:
|
||||
- block:
|
||||
- name: Local post-tasks
|
||||
import_tasks: playbooks/cloud-post.yml
|
||||
become: false
|
||||
tags: cloud
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
21
config.cfg
21
config.cfg
|
@ -10,8 +10,8 @@ users:
|
|||
|
||||
### Advanced users only below this line ###
|
||||
|
||||
# If True re-init all existing certificates. (True or False)
|
||||
easyrsa_reinit_existent: False
|
||||
# If True re-init all existing certificates. Boolean
|
||||
keys_clean_all: False
|
||||
|
||||
vpn_network: 10.19.48.0/24
|
||||
vpn_network_ipv6: 'fd9d:bc11:4020::/48'
|
||||
|
@ -28,9 +28,6 @@ wireguard_port: 51820
|
|||
# - https://serverfault.com/questions/601143/ssh-not-working-over-ipsec-tunnel-strongswan
|
||||
#max_mss: 1316
|
||||
|
||||
server_name: "{{ ansible_ssh_host }}"
|
||||
IP_subject_alt_name: "{{ ansible_ssh_host }}"
|
||||
|
||||
# StrongSwan log level
|
||||
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration
|
||||
strongswan_log_level: 2
|
||||
|
@ -64,7 +61,7 @@ VPN_PayloadIdentifier: "{{ 800000 | random | to_uuid | upper }}"
|
|||
CA_PayloadIdentifier: "{{ 700000 | random | to_uuid | upper }}"
|
||||
|
||||
# Block traffic between connected clients
|
||||
BetweenClients_DROP: Y
|
||||
BetweenClients_DROP: true
|
||||
|
||||
congrats:
|
||||
common: |
|
||||
|
@ -75,9 +72,9 @@ congrats:
|
|||
"# and ensure that all your traffic passes through the VPN. #"
|
||||
"# Local DNS resolver {{ local_service_ip }} #"
|
||||
p12_pass: |
|
||||
"# The p12 and SSH keys password for new users is {{ easyrsa_p12_export_password }} #"
|
||||
"# The p12 and SSH keys password for new users is {{ p12_export_password }} #"
|
||||
ca_key_pass: |
|
||||
"# The CA key password is {{ easyrsa_CA_password }} #"
|
||||
"# The CA key password is {{ CA_password }} #"
|
||||
ssh_access: |
|
||||
"# Shell access: ssh -i {{ ansible_ssh_private_key_file|default(omit) }} {{ ansible_ssh_user|default(omit) }}@{{ ansible_ssh_host|default(omit) }} #"
|
||||
|
||||
|
@ -98,6 +95,7 @@ cloud_providers:
|
|||
size: s-1vcpu-1gb
|
||||
image: "ubuntu-18-04-x64"
|
||||
ec2:
|
||||
encrypted: false
|
||||
size: t2.micro
|
||||
image:
|
||||
name: "ubuntu-bionic-18.04"
|
||||
|
@ -115,9 +113,16 @@ cloud_providers:
|
|||
openstack:
|
||||
flavor_ram: ">=512"
|
||||
image: Ubuntu-18.04
|
||||
vultr:
|
||||
os: Ubuntu 18.04 x64
|
||||
size: 1024 MB RAM,25 GB SSD,1.00 TB BW
|
||||
local:
|
||||
|
||||
fail_hint:
|
||||
- Sorry, but something went wrong!
|
||||
- Please check the troubleshooting guide.
|
||||
- https://trailofbits.github.io/algo/troubleshooting.html
|
||||
|
||||
booleans_map:
|
||||
Y: true
|
||||
y: true
|
||||
|
|
98
deploy.yml
98
deploy.yml
|
@ -1,98 +0,0 @@
|
|||
- name: Configure the server
|
||||
hosts: localhost
|
||||
tags: algo
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
pre_tasks:
|
||||
- block:
|
||||
- name: Local pre-tasks
|
||||
include_tasks: playbooks/local.yml
|
||||
tags: [ 'always' ]
|
||||
|
||||
- name: Local pre-tasks
|
||||
include_tasks: playbooks/local_ssh.yml
|
||||
become: false
|
||||
when: Deployed_By_Algo is defined and Deployed_By_Algo == "Y"
|
||||
tags: [ 'local' ]
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
roles:
|
||||
- { role: cloud-digitalocean, tags: ['digitalocean'] }
|
||||
- { role: cloud-ec2, tags: ['ec2'] }
|
||||
- { role: cloud-gce, tags: ['gce'] }
|
||||
- { role: cloud-azure, tags: ['azure'] }
|
||||
- { role: cloud-scaleway, tags: ['scaleway'] }
|
||||
- { role: cloud-openstack, tags: ['openstack'] }
|
||||
- { role: local, tags: ['local'] }
|
||||
|
||||
post_tasks:
|
||||
- block:
|
||||
- name: Local post-tasks
|
||||
include_tasks: playbooks/post.yml
|
||||
become: false
|
||||
tags: [ 'cloud' ]
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
- name: Configure the server and install required software
|
||||
hosts: vpn-host
|
||||
gather_facts: false
|
||||
tags: algo
|
||||
become: true
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
pre_tasks:
|
||||
- block:
|
||||
- name: Common pre-tasks
|
||||
include_tasks: playbooks/common.yml
|
||||
tags: [ 'digitalocean', 'ec2', 'gce', 'azure', 'lightsail', 'scaleway', 'openstack', 'local', 'pre' ]
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
roles:
|
||||
- { role: dns_adblocking, tags: [ 'dns', 'adblock' ] }
|
||||
- { role: ssh_tunneling, tags: [ 'ssh_tunneling' ] }
|
||||
- { role: wireguard, tags: [ 'vpn', 'wireguard' ], when: wireguard_enabled }
|
||||
- { role: vpn, tags: [ 'vpn' ] }
|
||||
|
||||
post_tasks:
|
||||
- block:
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass }}"
|
||||
- " {% if Store_CAKEY is defined and Store_CAKEY == 'N' %}{% else %}{{ congrats.ca_key_pass }}{% endif %}"
|
||||
- " {% if cloud_deployment is defined %}{{ congrats.ssh_access }}{% endif %}"
|
||||
tags: always
|
||||
|
||||
- name: Save the CA key password
|
||||
local_action: >
|
||||
shell echo "{{ easyrsa_CA_password }}" > /tmp/ca_password
|
||||
become: no
|
||||
tags: tests
|
||||
|
||||
- name: Delete the CA key
|
||||
local_action:
|
||||
module: file
|
||||
path: "configs/{{ IP_subject_alt_name }}/pki/private/cakey.pem"
|
||||
state: absent
|
||||
become: no
|
||||
tags: always
|
||||
when: Store_CAKEY is defined and Store_CAKEY == "N"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
|
@ -78,10 +78,10 @@ You will then be asked the remainder of the setup questions.
|
|||
|
||||
## Using DigitalOcean with Algo (via Ansible)
|
||||
|
||||
If you are using Ansible to deploy to DigitalOcean, you will need to pass the API Token to Ansible as `do_access_token`.
|
||||
If you are using Ansible to deploy to DigitalOcean, you will need to pass the API Token to Ansible as `do_token`.
|
||||
|
||||
For example,
|
||||
|
||||
ansible-playbook deploy.yml -t digitalocean,vpn,cloud -e 'do_access_token=my_secret_token do_server_name=algo.local do_region=ams2
|
||||
ansible-playbook deploy.yml -e 'provider=digitalocean do_token=my_secret_token'
|
||||
|
||||
Where "my_secret_token" is your API Token.
|
||||
Where "my_secret_token" is your API Token. For more references see [deploy-from-ansible](deploy-from-ansible.md)
|
||||
|
|
8
docs/cloud-vultr.md
Normal file
8
docs/cloud-vultr.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
### Configuration file
|
||||
|
||||
You need to create a configuration file in INI format with your api key (https://my.vultr.com/settings/#settingsapi)
|
||||
|
||||
```
|
||||
[default]
|
||||
key = <your api key>
|
||||
```
|
|
@ -11,74 +11,81 @@ You can deploy Algo non-interactively by running the Ansible playbooks directly
|
|||
Here is a full example for DigitalOcean:
|
||||
|
||||
```shell
|
||||
ansible-playbook deploy.yml -t digitalocean,vpn,cloud -e 'do_access_token=my_secret_token do_server_name=algo.local do_region=ams2'
|
||||
ansible-playbook main.yml -e "provider=digitalocean
|
||||
server_name=algo
|
||||
ondemand_cellular=false
|
||||
ondemand_wifi=false
|
||||
local_dns=true
|
||||
ssh_tunneling=true
|
||||
windows=false
|
||||
store_cakey=true
|
||||
region=ams3
|
||||
do_token=token"
|
||||
```
|
||||
|
||||
See below for more information about providers and extra variables
|
||||
|
||||
### Variables
|
||||
|
||||
- `provider` - (Required) The provider to use. See possible values below
|
||||
- `server_name` - (Required) Server name. Default: algo
|
||||
- `ondemand_cellular` (Optional) VPN On Demand when connected to cellular networks. Default: false
|
||||
- `ondemand_wifi` - (Optional. See `ondemand_wifi_exclude`) VPN On Demand when connected to WiFi networks. Default: false
|
||||
- `ondemand_wifi_exclude` (Required if `ondemand_wifi` set) - WiFi networks to exclude from using the VPN. Comma-separated values
|
||||
- `local_dns` - (Optional) Enable a DNS resolver. Default: false
|
||||
- `ssh_tunneling` - (Optional) Enable SSH tunneling for each user. Default: false
|
||||
- `windows` - (Optional) Enables compatible ciphers and key exchange to support Windows clietns, less secure. Default: false
|
||||
- `store_cakey` - (Optional) Whether or not keep the CA key (required to add users in the future, but less secure). Default: false
|
||||
|
||||
If any of those unspecified ansible will ask the user to input
|
||||
|
||||
### Ansible roles
|
||||
|
||||
Required tags:
|
||||
|
||||
- cloud
|
||||
Roles can be activated by specifying an extra variable `provider`
|
||||
|
||||
Cloud roles:
|
||||
|
||||
- role: cloud-digitalocean, tags: digitalocean
|
||||
- role: cloud-ec2, tags: ec2
|
||||
- role: cloud-gce, tags: gce
|
||||
- role: cloud-digitalocean, provider: digitalocean
|
||||
- role: cloud-ec2, provider: ec2
|
||||
- role: cloud-vultr, provider: vultr
|
||||
- role: cloud-gce, provider: gce
|
||||
- role: cloud-azure, provider: azure
|
||||
- role: cloud-scaleway, provider: scaleway
|
||||
- role: cloud-openstack, provider: openstack
|
||||
|
||||
Server roles:
|
||||
|
||||
- role: vpn, tags: vpn
|
||||
- role: dns_adblocking, tags: dns, adblock
|
||||
- role: security, tags: security
|
||||
- role: ssh_tunneling, tags: ssh_tunneling
|
||||
- role: vpn
|
||||
- role: dns_adblocking
|
||||
- role: dns_encryption
|
||||
- role: ssh_tunneling
|
||||
- role: wireguard
|
||||
|
||||
Note: The `vpn` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables:
|
||||
|
||||
- OnDemandEnabled_WIFI=Y
|
||||
- OnDemandEnabled_WIFI_EXCLUDE=HomeNet
|
||||
- OnDemandEnabled_Cellular=Y
|
||||
- ondemand_wifi: true
|
||||
- ondemand_wifi_exclude: HomeNet,OfficeWifi
|
||||
- ondemand_cellular: true
|
||||
|
||||
### Local Installation
|
||||
|
||||
Required tags:
|
||||
|
||||
- local
|
||||
- role: local, provider: local
|
||||
|
||||
Required variables:
|
||||
|
||||
- server_ip
|
||||
- server_user
|
||||
- IP_subject_alt_name
|
||||
- server - IP address of your server
|
||||
- ca_password - Password for the private CA key
|
||||
|
||||
Note that by default, the iptables rules on your existing server will be overwritten. If you don't want to overwrite the iptables rules, you can use the `--skip-tags iptables` flag, for example:
|
||||
|
||||
```shell
|
||||
ansible-playbook deploy.yml -t local,vpn --skip-tags iptables -e 'server_ip=172.217.2.238 server_user=algo IP_subject_alt_name=172.217.2.238'
|
||||
```
|
||||
Note that by default, the iptables rules on your existing server will be overwritten. If you don't want to overwrite the iptables rules, you can use the `--skip-tags iptables` flag.
|
||||
|
||||
### Digital Ocean
|
||||
|
||||
Required variables:
|
||||
|
||||
- do_access_token
|
||||
- do_server_name
|
||||
- do_region
|
||||
- do_token
|
||||
- region
|
||||
|
||||
Possible options for `do_region`:
|
||||
|
||||
- ams2
|
||||
- ams3
|
||||
- fra1
|
||||
- lon1
|
||||
- nyc1
|
||||
- nyc2
|
||||
- nyc3
|
||||
- sfo1
|
||||
- sfo2
|
||||
- sgp1
|
||||
- tor1
|
||||
- blr1
|
||||
Possible options can be gathered calling to https://api.digitalocean.com/v2/regions
|
||||
|
||||
### Amazon EC2
|
||||
|
||||
|
@ -86,27 +93,13 @@ Required variables:
|
|||
|
||||
- aws_access_key
|
||||
- aws_secret_key
|
||||
- aws_server_name
|
||||
- region
|
||||
|
||||
Possible options for `region`:
|
||||
Possible options can be gathered via cli `aws ec2 describe-regions`
|
||||
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
- ap-south-1
|
||||
- ap-northeast-2
|
||||
- ap-southeast-1
|
||||
- ap-southeast-2
|
||||
- ap-northeast-1
|
||||
- eu-central-1
|
||||
- eu-west-1
|
||||
- eu-west-2
|
||||
Additional variables:
|
||||
|
||||
Additional tags:
|
||||
|
||||
- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) (enabled by default)
|
||||
- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: false)
|
||||
|
||||
#### Minimum required IAM permissions for deployment:
|
||||
|
||||
|
@ -178,46 +171,76 @@ Additional tags:
|
|||
|
||||
Required variables:
|
||||
|
||||
- credentials_file
|
||||
- gce_server_name
|
||||
- ssh_public_key
|
||||
- zone
|
||||
- gce_credentials_file
|
||||
- [region](https://cloud.google.com/compute/docs/regions-zones/)
|
||||
|
||||
Possible options for `zone`:
|
||||
### Vultr
|
||||
|
||||
- us-west1-a
|
||||
- us-west1-b
|
||||
- us-west1-c
|
||||
- us-central1-a
|
||||
- us-central1-b
|
||||
- us-central1-c
|
||||
- us-central1-f
|
||||
- us-east4-a
|
||||
- us-east4-b
|
||||
- us-east4-c
|
||||
- us-east1-b
|
||||
- us-east1-c
|
||||
- us-east1-d
|
||||
- europe-north1-a
|
||||
- europe-north1-b
|
||||
- europe-north1-c
|
||||
- europe-west1-b
|
||||
- europe-west1-c
|
||||
- europe-west1-d
|
||||
- europe-west2-a
|
||||
- europe-west2-b
|
||||
- europe-west2-c
|
||||
- europe-west3-a
|
||||
- europe-west3-b
|
||||
- europe-west3-c
|
||||
- asia-southeast1-a
|
||||
- asia-southeast1-b
|
||||
- asia-east1-a
|
||||
- asia-east1-b
|
||||
- asia-east1-c
|
||||
- asia-northeast1-a
|
||||
- asia-northeast1-b
|
||||
- asia-northeast1-c
|
||||
- australia-southeast1-a
|
||||
- australia-southeast1-b
|
||||
- australia-southeast1-c
|
||||
Required variables:
|
||||
|
||||
- [vultr_config](https://github.com/trailofbits/algo/docs/cloud-vultr.md)
|
||||
- [region](https://api.vultr.com/v1/regions/list)
|
||||
|
||||
### Azure
|
||||
|
||||
Required variables:
|
||||
|
||||
- azure_secret
|
||||
- azure_tenant
|
||||
- azure_client_id
|
||||
- azure_subscription_id
|
||||
- [region](https://azure.microsoft.com/en-us/global-infrastructure/regions/)
|
||||
|
||||
### Lightsail
|
||||
|
||||
Required variables:
|
||||
|
||||
- aws_access_key
|
||||
- aws_secret_key
|
||||
- region
|
||||
|
||||
Possible options can be gathered via cli `aws lightsail get-regions`
|
||||
|
||||
### Scaleway
|
||||
|
||||
Required variables:
|
||||
|
||||
- [scaleway_token](https://www.scaleway.com/docs/generate-an-api-token/)
|
||||
- [scaleway_org](https://cloud.scaleway.com/#/billing)
|
||||
- region
|
||||
|
||||
Possible regions:
|
||||
|
||||
- ams1
|
||||
- par1
|
||||
|
||||
### OpenStack
|
||||
|
||||
You need to source the rc file prior to run Algo. Download it from the OpenStack dashboard->Compute->API Access and source it in the shell (eg: source /tmp/dhc-openrc.sh)
|
||||
|
||||
|
||||
### Local
|
||||
|
||||
Required variables:
|
||||
|
||||
- server - IP or hostname to access the server via SSH
|
||||
- endpoint - Public IP address of your server
|
||||
- ssh_user
|
||||
|
||||
|
||||
### Update users
|
||||
|
||||
Playbook:
|
||||
|
||||
```
|
||||
users.yml
|
||||
```
|
||||
|
||||
Required variables:
|
||||
|
||||
- server - IP or hostname to access the server via SSH
|
||||
- ca_password - Password to access the CA key
|
||||
|
||||
Tags required:
|
||||
|
||||
- update-users
|
||||
|
|
|
@ -26,5 +26,7 @@ device crypto
|
|||
## Installation
|
||||
|
||||
```shell
|
||||
ansible-playbook deploy.yml -t local,vpn -e "server_ip=$server_ip server_user=$server_user IP_subject_alt_name=$server_ip Store_CAKEY=N" --skip-tags cloud
|
||||
ansible-playbook main.yml -e "provider=local"
|
||||
```
|
||||
|
||||
And follow the instructions
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
* Cloud setup
|
||||
- Configure [Azure](cloud-azure.md)
|
||||
- Configure [DigitalOcean](cloud-do.md)
|
||||
- Configure [Vultr](cloud-vultr.md)
|
||||
* Advanced Deployment
|
||||
- Deploy to your own [FreeBSD](deploy-to-freebsd.md) server
|
||||
- Deploy to your own [Ubuntu 18.04](deploy-to-ubuntu.md) server
|
||||
|
|
137
input.yml
Normal file
137
input.yml
Normal file
|
@ -0,0 +1,137 @@
|
|||
---
|
||||
- name: Ask user for the input
|
||||
hosts: localhost
|
||||
tags: algo
|
||||
vars:
|
||||
defaults:
|
||||
server_name: algo
|
||||
ondemand_cellular: false
|
||||
ondemand_wifi: false
|
||||
local_dns: false
|
||||
ssh_tunneling: false
|
||||
windows: false
|
||||
store_cakey: false
|
||||
providers_map:
|
||||
- { name: DigitalOcean, alias: digitalocean }
|
||||
- { name: Amazon EC2, alias: ec2 }
|
||||
- { name: Vultr, alias: vultr }
|
||||
- { name: Microsoft Azure, alias: azure }
|
||||
- { name: Google Compute Engine, alias: gce }
|
||||
- { name: Scaleway, alias: scaleway}
|
||||
- { name: OpenStack (DreamCompute optimised), alias: openstack }
|
||||
- { name: Install to existing Ubuntu 18.04 server (Advanced), alias: local }
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
tasks:
|
||||
- pause:
|
||||
prompt: |
|
||||
What provider would you like to use?
|
||||
{% for p in providers_map %}
|
||||
{{ loop.index }}. {{ p['name']}}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired provider
|
||||
register: _algo_provider
|
||||
when: provider is undefined
|
||||
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}"
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Name the vpn server
|
||||
[algo]
|
||||
register: _algo_server_name
|
||||
when:
|
||||
- server_name is undefined
|
||||
- algo_provider != "local"
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS clients to enable "VPN On Demand" when connected to cellular networks?
|
||||
[y/N]
|
||||
register: _ondemand_cellular
|
||||
when: ondemand_cellular is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want macOS/iOS clients to enable "VPN On Demand" when connected to Wi-Fi?
|
||||
[y/N]
|
||||
register: _ondemand_wifi
|
||||
when: ondemand_wifi is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
List the names of trusted Wi-Fi networks (if any) that macOS/iOS clients exclude from using the VPN
|
||||
(e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
|
||||
register: _ondemand_wifi_exclude
|
||||
when:
|
||||
- ondemand_wifi_exclude is undefined
|
||||
- (ondemand_wifi|default(false)|bool) or
|
||||
(booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want to install a DNS resolver on this VPN server, to block ads while surfing?
|
||||
[y/N]
|
||||
register: _local_dns
|
||||
when: local_dns is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want each user to have their own account for SSH tunneling?
|
||||
[y/N]
|
||||
register: _ssh_tunneling
|
||||
when: ssh_tunneling is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want the VPN to support Windows 10 or Linux Desktop clients? (enables compatible ciphers and key exchange, less secure)
|
||||
[y/N]
|
||||
register: _windows
|
||||
when: windows is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Do you want to retain the CA key? (required to add users in the future, but less secure)
|
||||
[y/N]
|
||||
register: _store_cakey
|
||||
when: store_cakey is undefined
|
||||
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_server_name: >-
|
||||
{% if server_name is defined %}{% set _server = server_name %}
|
||||
{%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input != "" %}{% set _server = _algo_server_name.user_input %}
|
||||
{%- else %}{% set _server = defaults['server_name'] %}{% endif -%}
|
||||
{{ _server | regex_replace('(?!\.)(\W|_)', '-') }}
|
||||
algo_ondemand_cellular: >-
|
||||
{% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }}
|
||||
{%- elif _ondemand_cellular.user_input is defined and _ondemand_cellular.user_input != "" %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi: >-
|
||||
{% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }}
|
||||
{%- elif _ondemand_wifi.user_input is defined and _ondemand_wifi.user_input != "" %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ondemand_wifi_exclude: >-
|
||||
{% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude }}
|
||||
{%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input != "" %}{{ _ondemand_wifi_exclude.user_input }}
|
||||
{%- else %}_null{% endif %}
|
||||
algo_local_dns: >-
|
||||
{% if local_dns is defined %}{{ local_dns | bool }}
|
||||
{%- elif _local_dns.user_input is defined and _local_dns.user_input != "" %}{{ booleans_map[_local_dns.user_input] | default(defaults['local_dns']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_ssh_tunneling: >-
|
||||
{% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }}
|
||||
{%- elif _ssh_tunneling.user_input is defined and _ssh_tunneling.user_input != "" %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_windows: >-
|
||||
{% if windows is defined %}{{ windows | bool }}
|
||||
{%- elif _windows.user_input is defined and _windows.user_input != "" %}{{ booleans_map[_windows.user_input] | default(defaults['windows']) }}
|
||||
{%- else %}false{% endif %}
|
||||
algo_store_cakey: >-
|
||||
{% if store_cakey is defined %}{{ store_cakey | bool }}
|
||||
{%- elif _store_cakey.user_input is defined and _store_cakey.user_input != "" %}{{ booleans_map[_store_cakey.user_input] | default(defaults['store_cakey']) }}
|
||||
{%- else %}false{% endif %}
|
|
@ -1,217 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: digital_ocean_tag
|
||||
short_description: Create and remove tag(s) to DigitalOcean resource.
|
||||
description:
|
||||
- Create and remove tag(s) to DigitalOcean resource.
|
||||
author: "Victor Volle (@kontrafiktion)"
|
||||
version_added: "2.2"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the tag. The supported characters for names include
|
||||
alphanumeric characters, dashes, and underscores.
|
||||
required: true
|
||||
resource_id:
|
||||
description:
|
||||
- The ID of the resource to operate on.
|
||||
- The data type of resource_id is changed from integer to string, from version 2.5.
|
||||
aliases: ['droplet_id']
|
||||
resource_type:
|
||||
description:
|
||||
- The type of resource to operate on. Currently, only tagging of
|
||||
droplets is supported.
|
||||
default: droplet
|
||||
choices: ['droplet']
|
||||
state:
|
||||
description:
|
||||
- Whether the tag should be present or absent on the resource.
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
api_token:
|
||||
description:
|
||||
- DigitalOcean api token.
|
||||
|
||||
notes:
|
||||
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN.
|
||||
They both refer to the v2 token.
|
||||
- As of Ansible 2.0, Version 2 of the DigitalOcean API is used.
|
||||
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: create a tag
|
||||
digital_ocean_tag:
|
||||
name: production
|
||||
state: present
|
||||
|
||||
- name: tag a resource; creating the tag if it does not exists
|
||||
digital_ocean_tag:
|
||||
name: "{{ item }}"
|
||||
resource_id: "73333005"
|
||||
state: present
|
||||
with_items:
|
||||
- staging
|
||||
- dbserver
|
||||
|
||||
- name: untag a resource
|
||||
digital_ocean_tag:
|
||||
name: staging
|
||||
resource_id: "73333005"
|
||||
state: absent
|
||||
|
||||
# Deleting a tag also untags all the resources that have previously been
|
||||
# tagged with it
|
||||
- name: remove a tag
|
||||
digital_ocean_tag:
|
||||
name: dbserver
|
||||
state: absent
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''
|
||||
data:
|
||||
description: a DigitalOcean Tag resource
|
||||
returned: success and no resource constraint
|
||||
type: dict
|
||||
sample: {
|
||||
"tag": {
|
||||
"name": "awesome",
|
||||
"resources": {
|
||||
"droplets": {
|
||||
"count": 0,
|
||||
"last_tagged": null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
from traceback import format_exc
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.digital_ocean import DigitalOceanHelper
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
def core(module):
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
resource_id = module.params['resource_id']
|
||||
resource_type = module.params['resource_type']
|
||||
|
||||
rest = DigitalOceanHelper(module)
|
||||
|
||||
# Check if api_token is valid or not
|
||||
response = rest.get('account')
|
||||
if response.status_code == 401:
|
||||
module.fail_json(msg='Failed to login using api_token, please verify '
|
||||
'validity of api_token')
|
||||
if state == 'present':
|
||||
response = rest.get('tags/{0}'.format(name))
|
||||
status_code = response.status_code
|
||||
resp_json = response.json
|
||||
changed = False
|
||||
if status_code == 200 and resp_json['tag']['name'] == name:
|
||||
changed = False
|
||||
else:
|
||||
# Ensure Tag exists
|
||||
response = rest.post("tags", data={'name': name})
|
||||
status_code = response.status_code
|
||||
resp_json = response.json
|
||||
if status_code == 201:
|
||||
changed = True
|
||||
elif status_code == 422:
|
||||
changed = False
|
||||
else:
|
||||
module.exit_json(changed=False, data=resp_json)
|
||||
|
||||
if resource_id is None:
|
||||
# No resource defined, we're done.
|
||||
module.exit_json(changed=changed, data=resp_json)
|
||||
else:
|
||||
# Check if resource is already tagged or not
|
||||
found = False
|
||||
url = "{0}?tag_name={1}".format(resource_type, name)
|
||||
if resource_type == 'droplet':
|
||||
url = "droplets?tag_name={0}".format(name)
|
||||
response = rest.get(url)
|
||||
status_code = response.status_code
|
||||
resp_json = response.json
|
||||
if status_code == 200:
|
||||
for resource in resp_json['droplets']:
|
||||
if not found and resource['id'] == int(resource_id):
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
# If resource is not tagged, tag a resource
|
||||
url = "tags/{0}/resources".format(name)
|
||||
payload = {
|
||||
'resources': [{
|
||||
'resource_id': resource_id,
|
||||
'resource_type': resource_type}]}
|
||||
response = rest.post(url, data=payload)
|
||||
if response.status_code == 204:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.fail_json(msg="error tagging resource '{0}': {1}".format(resource_id, response.json["message"]))
|
||||
else:
|
||||
# Already tagged resource
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
# Unable to find resource specified by user
|
||||
module.fail_json(msg=resp_json['message'])
|
||||
|
||||
elif state == 'absent':
|
||||
if resource_id:
|
||||
url = "tags/{0}/resources".format(name)
|
||||
payload = {
|
||||
'resources': [{
|
||||
'resource_id': resource_id,
|
||||
'resource_type': resource_type}]}
|
||||
response = rest.delete(url, data=payload)
|
||||
else:
|
||||
url = "tags/{0}".format(name)
|
||||
response = rest.delete(url)
|
||||
if response.status_code == 204:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=False, data=response.json)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True),
|
||||
resource_id=dict(aliases=['droplet_id'], type='str'),
|
||||
resource_type=dict(choices=['droplet'], default='droplet'),
|
||||
state=dict(choices=['present', 'absent'], default='present'),
|
||||
api_token=dict(aliases=['API_TOKEN'], no_log=True),
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,216 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.1'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_ami_copy
|
||||
short_description: copies AMI between AWS regions, return new image id
|
||||
description:
|
||||
- Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5
|
||||
version_added: "2.0"
|
||||
options:
|
||||
source_region:
|
||||
description:
|
||||
- the source region that AMI should be copied from
|
||||
required: true
|
||||
source_image_id:
|
||||
description:
|
||||
- the id of the image in source region that should be copied
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- The name of the new image to copy
|
||||
required: true
|
||||
default: null
|
||||
description:
|
||||
description:
|
||||
- An optional human-readable string describing the contents and purpose of the new AMI.
|
||||
required: false
|
||||
default: null
|
||||
encrypted:
|
||||
description:
|
||||
- Whether or not to encrypt the target image
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.2"
|
||||
kms_key_id:
|
||||
description:
|
||||
- KMS key id used to encrypt image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.2"
|
||||
wait:
|
||||
description:
|
||||
- wait for the copied AMI to be in state 'available' before returning.
|
||||
required: false
|
||||
default: false
|
||||
tags:
|
||||
description:
|
||||
- a hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
|
||||
required: false
|
||||
default: null
|
||||
|
||||
author: Amir Moulavi <amir.moulavi@gmail.com>, Tim C <defunct@defunct.io>
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Basic AMI Copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
|
||||
# AMI copy wait until available
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
wait: yes
|
||||
register: image_id
|
||||
|
||||
# Named AMI copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
name: My-Awesome-AMI
|
||||
description: latest patch
|
||||
|
||||
# Tagged AMI copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
tags:
|
||||
Name: My-Super-AMI
|
||||
Patch: 1.2.3
|
||||
|
||||
# Encrypted AMI copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
encrypted: yes
|
||||
|
||||
# Encrypted AMI copy with specified key
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
encrypted: yes
|
||||
kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info)
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.ec2
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError, NoCredentialsError, NoRegionError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
|
||||
def copy_image(ec2, module):
|
||||
"""
|
||||
Copies an AMI
|
||||
|
||||
module : AnsibleModule object
|
||||
ec2: ec2 connection object
|
||||
"""
|
||||
|
||||
tags = module.params.get('tags')
|
||||
|
||||
params = {'SourceRegion': module.params.get('source_region'),
|
||||
'SourceImageId': module.params.get('source_image_id'),
|
||||
'Name': module.params.get('name'),
|
||||
'Description': module.params.get('description'),
|
||||
'Encrypted': module.params.get('encrypted'),
|
||||
# 'KmsKeyId': module.params.get('kms_key_id')
|
||||
}
|
||||
if module.params.get('kms_key_id'):
|
||||
params['KmsKeyId'] = module.params.get('kms_key_id')
|
||||
|
||||
try:
|
||||
image_id = ec2.copy_image(**params)['ImageId']
|
||||
if module.params.get('wait'):
|
||||
ec2.get_waiter('image_available').wait(ImageIds=[image_id])
|
||||
if module.params.get('tags'):
|
||||
ec2.create_tags(
|
||||
Resources=[image_id],
|
||||
Tags=[{'Key' : k, 'Value': v} for k,v in module.params.get('tags').items()]
|
||||
)
|
||||
|
||||
module.exit_json(changed=True, image_id=image_id)
|
||||
except ClientError as ce:
|
||||
module.fail_json(msg=ce)
|
||||
except NoCredentialsError:
|
||||
module.fail_json(msg="Unable to locate AWS credentials")
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
source_region=dict(required=True),
|
||||
source_image_id=dict(required=True),
|
||||
name=dict(required=True),
|
||||
description=dict(default=''),
|
||||
encrypted=dict(type='bool', required=False),
|
||||
kms_key_id=dict(type='str', required=False),
|
||||
wait=dict(type='bool', default=False, required=False),
|
||||
tags=dict(type='dict')))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
# TODO: Check botocore version
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
if HAS_BOTO3:
|
||||
|
||||
try:
|
||||
ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url,
|
||||
**aws_connect_params)
|
||||
except NoRegionError:
|
||||
module.fail_json(msg='AWS Region is required')
|
||||
else:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
copy_image(ec2, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
139
library/gce_region_facts.py
Normal file
139
library/gce_region_facts.py
Normal file
|
@ -0,0 +1,139 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright 2013 Google Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: gce_region_facts
|
||||
version_added: "5.3"
|
||||
short_description: Gather facts about GCE regions.
|
||||
description:
|
||||
- Gather facts about GCE regions.
|
||||
options:
|
||||
service_account_email:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
pem_file:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- path to the pem file associated with the service account email
|
||||
This option is deprecated. Use 'credentials_file'.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
credentials_file:
|
||||
version_added: "2.1.0"
|
||||
description:
|
||||
- path to the JSON file associated with the service account email
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
project_id:
|
||||
version_added: "1.6"
|
||||
description:
|
||||
- your GCE project ID
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
|
||||
author: "Jack Ivanov (@jackivanov)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Gather facts about all regions
|
||||
- gce_region_facts:
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
regions:
|
||||
returned: on success
|
||||
description: >
|
||||
Each element consists of a dict with all the information related
|
||||
to that region.
|
||||
type: list
|
||||
sample: "[{
|
||||
"name": "asia-east1",
|
||||
"status": "UP",
|
||||
"zones": [
|
||||
{
|
||||
"name": "asia-east1-a",
|
||||
"status": "UP"
|
||||
},
|
||||
{
|
||||
"name": "asia-east1-b",
|
||||
"status": "UP"
|
||||
},
|
||||
{
|
||||
"name": "asia-east1-c",
|
||||
"status": "UP"
|
||||
}
|
||||
]
|
||||
}]"
|
||||
'''
|
||||
try:
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.compute.providers import get_driver
|
||||
from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
|
||||
_ = Provider.GCE
|
||||
HAS_LIBCLOUD = True
|
||||
except ImportError:
|
||||
HAS_LIBCLOUD = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.gce import gce_connect, unexpected_error_msg
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
service_account_email=dict(),
|
||||
pem_file=dict(type='path'),
|
||||
credentials_file=dict(type='path'),
|
||||
project_id=dict(),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_LIBCLOUD:
|
||||
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
|
||||
|
||||
gce = gce_connect(module)
|
||||
|
||||
changed = False
|
||||
gce_regions = []
|
||||
|
||||
try:
|
||||
regions = gce.ex_list_regions()
|
||||
for r in regions:
|
||||
gce_region = {}
|
||||
gce_region['name'] = r.name
|
||||
gce_region['status'] = r.status
|
||||
gce_region['zones'] = []
|
||||
for z in r.zones:
|
||||
gce_zone = {}
|
||||
gce_zone['name'] = z.name
|
||||
gce_zone['status'] = z.status
|
||||
gce_region['zones'].append(gce_zone)
|
||||
gce_regions.append(gce_region)
|
||||
json_output = { 'regions': gce_regions }
|
||||
module.exit_json(changed=False, results=json_output)
|
||||
except ResourceNotFoundError:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
102
library/lightsail_region_facts.py
Normal file
102
library/lightsail_region_facts.py
Normal file
|
@ -0,0 +1,102 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lightsail_region_facts
|
||||
short_description: Gather facts about AWS Lightsail regions.
|
||||
description:
|
||||
- Gather facts about AWS Lightsail regions.
|
||||
version_added: "2.5.3"
|
||||
author: "Jack Ivanov (@jackivanov)"
|
||||
options:
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- boto3
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# Gather facts about all regions
|
||||
- lightsail_region_facts:
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
regions:
|
||||
returned: on success
|
||||
description: >
|
||||
Each element consists of a dict with all the information related
|
||||
to that region.
|
||||
type: list
|
||||
sample: "[{
|
||||
"availabilityZones": [],
|
||||
"continentCode": "NA",
|
||||
"description": "This region is recommended to serve users in the eastern United States",
|
||||
"displayName": "Virginia",
|
||||
"name": "us-east-1"
|
||||
}]"
|
||||
'''
|
||||
|
||||
import time
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import botocore
|
||||
HAS_BOTOCORE = True
|
||||
except ImportError:
|
||||
HAS_BOTOCORE = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
except ImportError:
|
||||
# will be caught by imported HAS_BOTO3
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
|
||||
HAS_BOTO3, camel_dict_to_snake_dict)
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='Python module "boto3" is missing, please install it')
|
||||
|
||||
if not HAS_BOTOCORE:
|
||||
module.fail_json(msg='Python module "botocore" is missing, please install it')
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
client = None
|
||||
try:
|
||||
client = boto3_conn(module, conn_type='client', resource='lightsail',
|
||||
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
|
||||
module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
|
||||
|
||||
response = client.get_regions(
|
||||
includeAvailabilityZones=False
|
||||
)
|
||||
module.exit_json(changed=False, results=response)
|
||||
except (botocore.exceptions.ClientError, Exception) as e:
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
9
main.yml
Normal file
9
main.yml
Normal file
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
- name: Include prompts playbook
|
||||
import_playbook: input.yml
|
||||
|
||||
- name: Include cloud provisioning playbook
|
||||
import_playbook: cloud.yml
|
||||
|
||||
- name: Include server configuration playbook
|
||||
import_playbook: server.yml
|
45
playbooks/cloud-post.yml
Normal file
45
playbooks/cloud-post.yml
Normal file
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
- name: Set subjectAltName as afact
|
||||
set_fact:
|
||||
IP_subject_alt_name: "{% if algo_provider == 'local' %}{{ IP_subject_alt_name }}{% else %}{{ cloud_instance_ip }}{% endif %}"
|
||||
|
||||
- name: Add the server to an inventory group
|
||||
add_host:
|
||||
name: "{% if cloud_instance_ip == 'localhost' %}localhost{% else %}{{ cloud_instance_ip }}{% endif %}"
|
||||
groups: vpn-host
|
||||
ansible_connection: "{% if cloud_instance_ip == 'localhost' %}local{% else %}ssh{% endif %}"
|
||||
ansible_ssh_user: "{{ ansible_ssh_user }}"
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
algo_provider: "{{ algo_provider }}"
|
||||
algo_server_name: "{{ algo_server_name }}"
|
||||
algo_ondemand_cellular: "{{ algo_ondemand_cellular }}"
|
||||
algo_ondemand_wifi: "{{ algo_ondemand_wifi }}"
|
||||
algo_ondemand_wifi_exclude: "{{ algo_ondemand_wifi_exclude }}"
|
||||
algo_local_dns: "{{ algo_local_dns }}"
|
||||
algo_ssh_tunneling: "{{ algo_ssh_tunneling }}"
|
||||
algo_windows: "{{ algo_windows }}"
|
||||
algo_store_cakey: "{{ algo_store_cakey }}"
|
||||
IP_subject_alt_name: "{{ IP_subject_alt_name }}"
|
||||
|
||||
- name: Additional variables for the server
|
||||
add_host:
|
||||
name: "{% if cloud_instance_ip == 'localhost' %}localhost{% else %}{{ cloud_instance_ip }}{% endif %}"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
when: algo_provider != 'local'
|
||||
|
||||
- name: Wait until SSH becomes ready...
|
||||
wait_for:
|
||||
port: 22
|
||||
host: "{{ cloud_instance_ip }}"
|
||||
search_regex: "OpenSSH"
|
||||
delay: 10
|
||||
timeout: 320
|
||||
state: present
|
||||
when: cloud_instance_ip != "localhost"
|
||||
|
||||
- debug:
|
||||
var: IP_subject_alt_name
|
||||
|
||||
- name: A short pause, in order to be sure the instance is ready
|
||||
pause:
|
||||
seconds: 20
|
13
playbooks/cloud-pre.yml
Normal file
13
playbooks/cloud-pre.yml
Normal file
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
- name: Generate the SSH private key
|
||||
openssl_privatekey:
|
||||
path: "{{ SSH_keys.private }}"
|
||||
size: 2048
|
||||
mode: "0600"
|
||||
type: RSA
|
||||
|
||||
- name: Generate the SSH public key
|
||||
openssl_publickey:
|
||||
path: "{{ SSH_keys.public }}"
|
||||
privatekey_path: "{{ SSH_keys.private }}"
|
||||
format: OpenSSH
|
|
@ -1,15 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Check the system
|
||||
raw: uname -a
|
||||
register: OS
|
||||
|
||||
- name: Ubuntu pre-tasks
|
||||
include_tasks: ubuntu.yml
|
||||
when: '"Ubuntu" in OS.stdout or "Linux" in OS.stdout'
|
||||
|
||||
- name: FreeBSD pre-tasks
|
||||
include_tasks: freebsd.yml
|
||||
when: '"FreeBSD" in OS.stdout'
|
||||
|
||||
- include_tasks: facts/main.yml
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
|
||||
- set_fact:
|
||||
config_prefix: "/usr/local/"
|
||||
root_group: wheel
|
||||
ssh_service_name: sshd
|
||||
apparmor_enabled: false
|
||||
strongswan_additional_plugins:
|
||||
- kernel-pfroute
|
||||
- kernel-pfkey
|
|
@ -1,44 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Gather Facts
|
||||
setup:
|
||||
|
||||
- name: Ensure the algo ssh key exist on the server
|
||||
authorized_key:
|
||||
user: "{{ ansible_ssh_user }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
tags: [ 'cloud' ]
|
||||
|
||||
- name: Check if IPv6 configured
|
||||
set_fact:
|
||||
ipv6_support: "{% if ansible_default_ipv6['gateway'] is defined %}true{% else %}false{% endif %}"
|
||||
|
||||
- name: Set facts if the deployment in a cloud
|
||||
set_fact:
|
||||
cloud_deployment: true
|
||||
tags: ['cloud']
|
||||
|
||||
- name: Generate password for the CA key
|
||||
local_action:
|
||||
module: shell
|
||||
openssl rand -hex 16
|
||||
become: no
|
||||
register: CA_password
|
||||
|
||||
- name: Generate p12 export password
|
||||
local_action:
|
||||
module: shell
|
||||
openssl rand 8 | python -c 'import sys,string; chars=string.ascii_letters + string.digits + "_@"; print "".join([chars[ord(c) % 64] for c in list(sys.stdin.read())])'
|
||||
become: no
|
||||
register: p12_export_password_generated
|
||||
when: p12_export_password is not defined
|
||||
|
||||
- name: Define password facts
|
||||
set_fact:
|
||||
easyrsa_p12_export_password: "{{ p12_export_password|default(p12_export_password_generated.stdout) }}"
|
||||
easyrsa_CA_password: "{{ CA_password.stdout }}"
|
||||
|
||||
- name: Define the commonName
|
||||
set_fact:
|
||||
IP_subject_alt_name: "{{ IP_subject_alt_name }}"
|
|
@ -1,9 +0,0 @@
|
|||
---
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Install prerequisites
|
||||
raw: sleep 10 && env ASSUME_ALWAYS_YES=YES sudo pkg install -y python27
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Configure defaults
|
||||
raw: sudo ln -sf /usr/local/bin/python2.7 /usr/bin/python2.7
|
||||
|
||||
- include_tasks: facts/FreeBSD.yml
|
|
@ -1,31 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Generate the SSH private key
|
||||
shell: >
|
||||
echo -e 'n' |
|
||||
ssh-keygen -b 2048 -C {{ SSH_keys.comment }}
|
||||
-t rsa -f {{ SSH_keys.private }} -q -N ""
|
||||
args:
|
||||
creates: "{{ SSH_keys.private }}"
|
||||
|
||||
- name: Generate the SSH public key
|
||||
shell: >
|
||||
echo `ssh-keygen -y -f {{ SSH_keys.private }}` {{ SSH_keys.comment }}
|
||||
> {{ SSH_keys.public }}
|
||||
changed_when: false
|
||||
|
||||
- name: Change mode for the SSH private key
|
||||
file:
|
||||
path: "{{ SSH_keys.private }}"
|
||||
mode: 0600
|
||||
|
||||
- name: Ensure the dynamic inventory exists
|
||||
blockinfile:
|
||||
dest: configs/inventory.dynamic
|
||||
marker: "# {mark} ALGO MANAGED BLOCK"
|
||||
create: true
|
||||
block: |
|
||||
[algo:children]
|
||||
{% for group in cloud_providers.keys() %}
|
||||
{{ group }}
|
||||
{% endfor %}
|
|
@ -1,12 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Ensure the local ssh directory is exist
|
||||
file:
|
||||
path: ~/.ssh/
|
||||
state: directory
|
||||
|
||||
- name: Copy the algo ssh key to the local ssh directory
|
||||
copy:
|
||||
src: "{{ SSH_keys.private }}"
|
||||
dest: ~/.ssh/algo.pem
|
||||
mode: '0600'
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Wait until SSH becomes ready...
|
||||
wait_for:
|
||||
port: 22
|
||||
host: "{{ cloud_instance_ip }}"
|
||||
search_regex: "OpenSSH"
|
||||
delay: 10
|
||||
timeout: 320
|
||||
state: present
|
||||
|
||||
- name: A short pause, in order to be sure the instance is ready
|
||||
pause:
|
||||
seconds: 20
|
||||
|
||||
- include_tasks: local_ssh.yml
|
|
@ -1,14 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Ubuntu | Install prerequisites
|
||||
raw: "{{ item }}"
|
||||
with_items:
|
||||
- sleep 10
|
||||
- apt-get update -qq
|
||||
- apt-get install -qq -y python2.7 sudo
|
||||
become: true
|
||||
|
||||
- name: Ubuntu | Configure defaults
|
||||
raw: sudo update-alternatives --install /usr/bin/python python /usr/bin/python2.7 1
|
||||
tags:
|
||||
- update-alternatives
|
|
@ -1,6 +1,6 @@
|
|||
setuptools>=11.3
|
||||
SecretStorage < 3
|
||||
ansible[azure]==2.4.3
|
||||
ansible[azure]==2.5.2
|
||||
dopy==0.3.5
|
||||
boto>=2.5
|
||||
boto3
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
setup:
|
||||
|
||||
- name: Include system based facts and tasks
|
||||
include_tasks: systems/main.yml
|
||||
import_tasks: systems/main.yml
|
||||
|
||||
- name: Install prerequisites
|
||||
package: name="{{ item }}" state=present
|
||||
|
|
214
roles/cloud-azure/defaults/main.yml
Normal file
214
roles/cloud-azure/defaults/main.yml
Normal file
|
@ -0,0 +1,214 @@
|
|||
---
|
||||
azure_regions: >
|
||||
[
|
||||
{
|
||||
"displayName": "East Asia",
|
||||
"latitude": "22.267",
|
||||
"longitude": "114.188",
|
||||
"name": "eastasia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Southeast Asia",
|
||||
"latitude": "1.283",
|
||||
"longitude": "103.833",
|
||||
"name": "southeastasia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Central US",
|
||||
"latitude": "41.5908",
|
||||
"longitude": "-93.6208",
|
||||
"name": "centralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "East US",
|
||||
"latitude": "37.3719",
|
||||
"longitude": "-79.8164",
|
||||
"name": "eastus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "East US 2",
|
||||
"latitude": "36.6681",
|
||||
"longitude": "-78.3889",
|
||||
"name": "eastus2",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West US",
|
||||
"latitude": "37.783",
|
||||
"longitude": "-122.417",
|
||||
"name": "westus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "North Central US",
|
||||
"latitude": "41.8819",
|
||||
"longitude": "-87.6278",
|
||||
"name": "northcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South Central US",
|
||||
"latitude": "29.4167",
|
||||
"longitude": "-98.5",
|
||||
"name": "southcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "North Europe",
|
||||
"latitude": "53.3478",
|
||||
"longitude": "-6.2597",
|
||||
"name": "northeurope",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West Europe",
|
||||
"latitude": "52.3667",
|
||||
"longitude": "4.9",
|
||||
"name": "westeurope",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Japan West",
|
||||
"latitude": "34.6939",
|
||||
"longitude": "135.5022",
|
||||
"name": "japanwest",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Japan East",
|
||||
"latitude": "35.68",
|
||||
"longitude": "139.77",
|
||||
"name": "japaneast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Brazil South",
|
||||
"latitude": "-23.55",
|
||||
"longitude": "-46.633",
|
||||
"name": "brazilsouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia East",
|
||||
"latitude": "-33.86",
|
||||
"longitude": "151.2094",
|
||||
"name": "australiaeast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Southeast",
|
||||
"latitude": "-37.8136",
|
||||
"longitude": "144.9631",
|
||||
"name": "australiasoutheast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "South India",
|
||||
"latitude": "12.9822",
|
||||
"longitude": "80.1636",
|
||||
"name": "southindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Central India",
|
||||
"latitude": "18.5822",
|
||||
"longitude": "73.9197",
|
||||
"name": "centralindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West India",
|
||||
"latitude": "19.088",
|
||||
"longitude": "72.868",
|
||||
"name": "westindia",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Canada Central",
|
||||
"latitude": "43.653",
|
||||
"longitude": "-79.383",
|
||||
"name": "canadacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Canada East",
|
||||
"latitude": "46.817",
|
||||
"longitude": "-71.217",
|
||||
"name": "canadaeast",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UK South",
|
||||
"latitude": "50.941",
|
||||
"longitude": "-0.799",
|
||||
"name": "uksouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "UK West",
|
||||
"latitude": "53.427",
|
||||
"longitude": "-3.084",
|
||||
"name": "ukwest",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West Central US",
|
||||
"latitude": "40.890",
|
||||
"longitude": "-110.234",
|
||||
"name": "westcentralus",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "West US 2",
|
||||
"latitude": "47.233",
|
||||
"longitude": "-119.852",
|
||||
"name": "westus2",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Korea Central",
|
||||
"latitude": "37.5665",
|
||||
"longitude": "126.9780",
|
||||
"name": "koreacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Korea South",
|
||||
"latitude": "35.1796",
|
||||
"longitude": "129.0756",
|
||||
"name": "koreasouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "France Central",
|
||||
"latitude": "46.3772",
|
||||
"longitude": "2.3730",
|
||||
"name": "francecentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "France South",
|
||||
"latitude": "43.8345",
|
||||
"longitude": "2.1972",
|
||||
"name": "francesouth",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Central",
|
||||
"latitude": "-35.3075",
|
||||
"longitude": "149.1244",
|
||||
"name": "australiacentral",
|
||||
"subscriptionId": null
|
||||
},
|
||||
{
|
||||
"displayName": "Australia Central 2",
|
||||
"latitude": "-35.3075",
|
||||
"longitude": "149.1244",
|
||||
"name": "australiacentral2",
|
||||
"subscriptionId": null
|
||||
}
|
||||
]
|
|
@ -1,5 +1,8 @@
|
|||
---
|
||||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- set_fact:
|
||||
resource_group: "Algo_{{ region }}"
|
||||
secret: "{{ azure_secret | default(lookup('env','AZURE_SECRET'), true) }}"
|
||||
|
@ -116,31 +119,10 @@
|
|||
subnet_name: algo_subnet
|
||||
security_group_name: AlgoSecGroup
|
||||
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ ip_address }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: azure
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ ip_address }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
|
||||
- name: Ensure the group azure exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[azure]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[azure\]'
|
||||
regexp: "^{{ cloud_instance_ip }}.*"
|
||||
line: "{{ cloud_instance_ip }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
|
70
roles/cloud-azure/tasks/prompts.yml
Normal file
70
roles/cloud-azure/tasks/prompts.yml
Normal file
|
@ -0,0 +1,70 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your azure secret id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
echo: false
|
||||
register: _azure_secret
|
||||
when:
|
||||
- azure_secret is undefined
|
||||
- lookup('env','AZURE_SECRET')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your azure tenant id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
echo: false
|
||||
register: _azure_tenant
|
||||
when:
|
||||
- azure_tenant is undefined
|
||||
- lookup('env','AZURE_TENANT')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your azure client id (application id) (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
echo: false
|
||||
register: _azure_client_id
|
||||
when:
|
||||
- azure_client_id is undefined
|
||||
- lookup('env','AZURE_CLIENT_ID')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your azure subscription id (https://github.com/trailofbits/algo/blob/master/docs/cloud-azure.md)
|
||||
You can skip this step if you want to use your defaults credentials from ~/.azure/credentials
|
||||
echo: false
|
||||
register: _azure_subscription_id
|
||||
when:
|
||||
- azure_subscription_id is undefined
|
||||
- lookup('env','AZURE_SUBSCRIPTION_ID')|length <= 0
|
||||
|
||||
- set_fact:
|
||||
secret: "{{ azure_secret | default(_azure_secret.user_input|default(None)) | default(lookup('env','AZURE_SECRET'), true) }}"
|
||||
tenant: "{{ azure_tenant | default(_azure_tenant.user_input|default(None)) | default(lookup('env','AZURE_TENANT'), true) }}"
|
||||
client_id: "{{ azure_client_id | default(_azure_client_id.user_input|default(None)) | default(lookup('env','AZURE_CLIENT_ID'), true) }}"
|
||||
subscription_id: "{{ azure_subscription_id | default(_azure_subscription_id.user_input|default(None)) | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}"
|
||||
|
||||
- block:
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
aws_regions: "{{ azure_regions | sort(attribute='region_name') }}"
|
||||
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in aws_regions %}
|
||||
{%- if r['region_name'] == "us-east-1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
{% for r in aws_regions %}
|
||||
{{ loop.index }}. {{ r['region_name'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
|
@ -1,7 +1,13 @@
|
|||
- block:
|
||||
- name: Set the DigitalOcean Access Token fact
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Set additional facts
|
||||
set_fact:
|
||||
do_token: "{{ do_access_token | default(lookup('env','DO_API_TOKEN'), true) }}"
|
||||
algo_do_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ do_regions[_algo_region.user_input | int -1 ]['slug'] }}
|
||||
{%- else %}{{ do_regions[default_region | int - 1]['slug'] }}{% endif %}
|
||||
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- block:
|
||||
|
@ -9,7 +15,7 @@
|
|||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
until: ssh_keys.changed != true
|
||||
|
@ -21,7 +27,7 @@
|
|||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
|
@ -36,7 +42,7 @@
|
|||
state: present
|
||||
command: ssh
|
||||
ssh_pub_key: "{{ public_key }}"
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: do_ssh_key
|
||||
|
||||
|
@ -44,69 +50,33 @@
|
|||
digital_ocean:
|
||||
state: present
|
||||
command: droplet
|
||||
name: "{{ do_server_name }}"
|
||||
region_id: "{{ do_region }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
region_id: "{{ algo_do_region }}"
|
||||
size_id: "{{ cloud_providers.digitalocean.size }}"
|
||||
image_id: "{{ cloud_providers.digitalocean.image }}"
|
||||
ssh_key_ids: "{{ do_ssh_key.ssh_key.id }}"
|
||||
unique_name: yes
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
ipv6: yes
|
||||
register: do
|
||||
|
||||
- name: Add the droplet to an inventory group
|
||||
add_host:
|
||||
name: "{{ do.droplet.ip_address }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: root
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
do_access_token: "{{ do_token }}"
|
||||
do_droplet_id: "{{ do.droplet.id }}"
|
||||
cloud_provider: digitalocean
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ do.droplet.ip_address }}"
|
||||
ansible_ssh_user: root
|
||||
|
||||
- name: Tag the droplet
|
||||
digital_ocean_tag:
|
||||
name: "Environment:Algo"
|
||||
resource_id: "{{ do.droplet.id }}"
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
state: present
|
||||
|
||||
- name: Get droplets
|
||||
uri:
|
||||
url: "https://api.digitalocean.com/v2/droplets?tag_name=Environment:Algo"
|
||||
method: GET
|
||||
status_code: 200
|
||||
headers:
|
||||
Content-Type: "application/json"
|
||||
Authorization: "Bearer {{ do_token }}"
|
||||
register: do_droplets
|
||||
|
||||
- name: Ensure the group digitalocean exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[digitalocean]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[digitalocean\]'
|
||||
regexp: "^{{ item.networks.v4[0].ip_address }}.*"
|
||||
line: "{{ item.networks.v4[0].ip_address }}"
|
||||
with_items:
|
||||
- "{{ do_droplets.json.droplets }}"
|
||||
|
||||
- block:
|
||||
- name: "Delete the new Algo SSH key"
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
until: ssh_keys.changed != true
|
||||
|
@ -118,7 +88,7 @@
|
|||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ do_token }}"
|
||||
api_token: "{{ algo_do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
|
|
46
roles/cloud-digitalocean/tasks/prompts.yml
Normal file
46
roles/cloud-digitalocean/tasks/prompts.yml
Normal file
|
@ -0,0 +1,46 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your API token. The token must have read and write permissions (https://cloud.digitalocean.com/settings/api/tokens):
|
||||
echo: false
|
||||
register: _do_token
|
||||
when:
|
||||
- do_token is undefined
|
||||
- lookup('env','DO_API_TOKEN')|length <= 0
|
||||
|
||||
- name: Set the token as a fact
|
||||
set_fact:
|
||||
algo_do_token: "{{ do_token | default(_do_token.user_input|default(None)) | default(lookup('env','DO_API_TOKEN'), true) }}"
|
||||
|
||||
- name: Get regions
|
||||
uri:
|
||||
url: https://api.digitalocean.com/v2/regions
|
||||
method: GET
|
||||
status_code: 200
|
||||
headers:
|
||||
Content-Type: "application/json"
|
||||
Authorization: "Bearer {{ algo_do_token }}"
|
||||
register: _do_regions
|
||||
|
||||
- name: Set facts about thre regions
|
||||
set_fact:
|
||||
do_regions: "{{ _do_regions.json.regions | sort(attribute='slug') }}"
|
||||
|
||||
- name: Set default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in do_regions %}
|
||||
{%- if r['slug'] == "nyc3" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
{% for r in do_regions %}
|
||||
{{ loop.index }}. {{ r['slug'] }} {{ r['name'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
|
@ -1,6 +0,0 @@
|
|||
iface eth0 inet6 static
|
||||
address {{ item.ip_address }}
|
||||
netmask {{ item.netmask }}
|
||||
gateway {{ item.gateway }}
|
||||
autoconf 0
|
||||
dns-nameservers 2001:4860:4860::8844 2001:4860:4860::8888
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
|
||||
ami_search_encrypted: omit
|
||||
encrypted: "{{ cloud_providers.ec2.encrypted }}"
|
||||
ec2_vpc_nets:
|
||||
cidr_block: 172.16.0.0/16
|
||||
subnet_cidr: 172.16.254.0/23
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
---
|
||||
- name: Deploy the template
|
||||
cloudformation:
|
||||
aws_access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true)}}"
|
||||
aws_secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true)}}"
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
stack_name: "{{ stack_name }}"
|
||||
state: "present"
|
||||
region: "{{ region }}"
|
||||
region: "{{ algo_region }}"
|
||||
template: roles/cloud-ec2/files/stack.yml
|
||||
template_parameters:
|
||||
InstanceTypeParameter: "{{ cloud_providers.ec2.size }}"
|
||||
|
|
|
@ -1,37 +1,27 @@
|
|||
---
|
||||
- name: Check if the encrypted image already exist
|
||||
ec2_ami_find:
|
||||
aws_access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true)}}"
|
||||
aws_secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true)}}"
|
||||
owner: self
|
||||
sort: creationDate
|
||||
sort_order: descending
|
||||
sort_end: 1
|
||||
state: available
|
||||
ami_tags:
|
||||
Algo: "encrypted"
|
||||
region: "{{ region }}"
|
||||
ec2_ami_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
owners: self
|
||||
region: "{{ algo_region }}"
|
||||
filters:
|
||||
state: available
|
||||
"tag:Algo": encrypted
|
||||
register: search_crypt
|
||||
|
||||
- set_fact:
|
||||
ami_image: "{{ search_crypt.results[0].ami_id }}"
|
||||
when: search_crypt.results
|
||||
|
||||
- name: Copy to an encrypted image
|
||||
ec2_ami_copy:
|
||||
aws_access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true)}}"
|
||||
aws_secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true)}}"
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
encrypted: yes
|
||||
name: algo
|
||||
kms_key_id: "{{ kms_key_id | default(omit) }}"
|
||||
region: "{{ region }}"
|
||||
source_image_id: "{{ ami_image }}"
|
||||
source_region: "{{ region }}"
|
||||
region: "{{ algo_region }}"
|
||||
source_image_id: "{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}"
|
||||
source_region: "{{ algo_region }}"
|
||||
wait: true
|
||||
tags:
|
||||
Algo: "encrypted"
|
||||
wait: true
|
||||
register: enc_image
|
||||
when: not search_crypt.results
|
||||
|
||||
- set_fact:
|
||||
ami_image: "{{ enc_image.image_id }}"
|
||||
when: not search_crypt.results
|
||||
register: ami_search_encrypted
|
||||
when: search_crypt.images|length|int == 0
|
||||
|
|
|
@ -1,66 +1,40 @@
|
|||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- set_fact:
|
||||
access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||
secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
stack_name: "{{ aws_server_name | replace('.', '-') }}"
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ aws_regions[_algo_region.user_input | int -1 ]['region_name'] }}
|
||||
{%- else %}{{ aws_regions[default_region | int - 1]['region_name'] }}{% endif %}
|
||||
stack_name: "{{ algo_server_name | replace('.', '-') }}"
|
||||
|
||||
- name: Locate official AMI for region
|
||||
ec2_ami_find:
|
||||
ec2_ami_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
|
||||
owner: "{{ cloud_providers.ec2.image.owner }}"
|
||||
sort: creationDate
|
||||
sort_order: descending
|
||||
sort_end: 1
|
||||
region: "{{ region }}"
|
||||
owners: "{{ cloud_providers.ec2.image.owner }}"
|
||||
region: "{{ algo_region }}"
|
||||
filters:
|
||||
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
|
||||
register: ami_search
|
||||
|
||||
- set_fact:
|
||||
ami_image: "{{ ami_search.results[0].ami_id }}"
|
||||
- import_tasks: encrypt_image.yml
|
||||
when: encrypted
|
||||
|
||||
- include_tasks: encrypt_image.yml
|
||||
tags: [encrypted]
|
||||
- name: Set the ami id as a fact
|
||||
set_fact:
|
||||
ami_image: >-
|
||||
{% if ami_search_encrypted.image_id is defined %}{{ ami_search_encrypted.image_id }}
|
||||
{%- elif search_crypt.images is defined and search_crypt.images|length >= 1 %}{{ (search_crypt.images | sort(attribute='creation_date') | last)['image_id'] }}
|
||||
{%- else %}{{ (ami_search.images | sort(attribute='creation_date') | last)['image_id'] }}{% endif %}
|
||||
|
||||
- include_tasks: cloudformation.yml
|
||||
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ stack.stack_outputs.ElasticIP }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: ec2
|
||||
- name: Deploy the stack
|
||||
import_tasks: cloudformation.yml
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ stack.stack_outputs.ElasticIP }}"
|
||||
|
||||
- name: Get EC2 instances
|
||||
ec2_instance_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: "{{ region }}"
|
||||
filters:
|
||||
instance-state-name: running
|
||||
"tag:Environment": Algo
|
||||
register: algo_instances
|
||||
|
||||
- name: Ensure the group ec2 exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[ec2]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[ec2\]'
|
||||
regexp: "^{{ item.public_ip_address }}.*"
|
||||
line: "{{ item.public_ip_address }}"
|
||||
with_items:
|
||||
- "{{ algo_instances.instances }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
|
55
roles/cloud-ec2/tasks/prompts.yml
Normal file
55
roles/cloud-ec2/tasks/prompts.yml
Normal file
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md)
|
||||
echo: false
|
||||
register: _aws_access_key
|
||||
when:
|
||||
- aws_access_key is undefined
|
||||
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
echo: false
|
||||
register: _aws_secret_key
|
||||
when:
|
||||
- aws_secret_key is undefined
|
||||
- lookup('env','AWS_SECRET_ACCESS_KEY')|length <= 0
|
||||
|
||||
- set_fact:
|
||||
access_key: "{{ aws_access_key | default(_aws_access_key.user_input|default(None)) | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||
secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
|
||||
- block:
|
||||
- name: Get regions
|
||||
aws_region_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: us-east-1
|
||||
register: _aws_regions
|
||||
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
aws_regions: "{{ _aws_regions.regions | sort(attribute='region_name') }}"
|
||||
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in aws_regions %}
|
||||
{%- if r['region_name'] == "us-east-1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
|
||||
{% for r in aws_regions %}
|
||||
{{ loop.index }}. {{ r['region_name'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
|
@ -1,68 +1,37 @@
|
|||
- block:
|
||||
- set_fact:
|
||||
credentials_file_path: "{{ credentials_file | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}"
|
||||
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- set_fact:
|
||||
credentials_file_lookup: "{{ lookup('file', '{{ credentials_file_path }}') }}"
|
||||
|
||||
- set_fact:
|
||||
service_account_email: "{{ credentials_file_lookup.client_email | default(lookup('env','GCE_EMAIL')) }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}"
|
||||
server_name: "{{ gce_server_name | replace('_', '-') }}"
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Network configured
|
||||
gce_net:
|
||||
name: "algo-net-{{ server_name }}"
|
||||
fwname: "algo-net-{{ server_name }}-fw"
|
||||
name: "algo-net-{{ algo_server_name }}"
|
||||
fwname: "algo-net-{{ algo_server_name }}-fw"
|
||||
allowed: "udp:500,4500,{{ wireguard_port }};tcp:22"
|
||||
state: "present"
|
||||
mode: auto
|
||||
src_range: 0.0.0.0/0
|
||||
service_account_email: "{{ credentials_file_lookup.client_email }}"
|
||||
credentials_file: "{{ credentials_file }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
|
||||
- name: "Creating a new instance..."
|
||||
gce:
|
||||
instance_names: "{{ server_name }}"
|
||||
zone: "{{ zone }}"
|
||||
instance_names: "{{ algo_server_name }}"
|
||||
zone: "{{ algo_region }}"
|
||||
machine_type: "{{ cloud_providers.gce.size }}"
|
||||
image: "{{ cloud_providers.gce.image }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
metadata: '{"ssh-keys":"ubuntu:{{ ssh_public_key_lookup }}"}'
|
||||
network: "algo-net-{{ server_name }}"
|
||||
network: "algo-net-{{ algo_server_name }}"
|
||||
tags:
|
||||
- "environment-algo"
|
||||
register: google_vm
|
||||
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: gce
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
|
||||
- name: Ensure the group gce exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[gce]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[gce\]'
|
||||
regexp: "^{{ google_vm.instance_data[0].public_ip }}.*"
|
||||
line: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
ansible_ssh_user: ubuntu
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
|
67
roles/cloud-gce/tasks/prompts.yml
Normal file
67
roles/cloud-gce/tasks/prompts.yml
Normal file
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the local path to your credentials JSON file
|
||||
(https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts)
|
||||
register: _gce_credentials_file
|
||||
when:
|
||||
- gce_credentials_file is undefined
|
||||
- lookup('env','GCE_CREDENTIALS_FILE_PATH')|length <= 0
|
||||
|
||||
- set_fact:
|
||||
credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}"
|
||||
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- set_fact:
|
||||
credentials_file_lookup: "{{ lookup('file', '{{ credentials_file_path }}') }}"
|
||||
|
||||
- set_fact:
|
||||
service_account_email: "{{ credentials_file_lookup.client_email | default(lookup('env','GCE_EMAIL')) }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}"
|
||||
|
||||
- block:
|
||||
- name: Get regions
|
||||
gce_region_facts:
|
||||
service_account_email: "{{ credentials_file_lookup.client_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id }}"
|
||||
register: _gce_regions
|
||||
|
||||
- name: Set facts about the regions
|
||||
set_fact:
|
||||
gce_regions: >-
|
||||
[{%- for region in _gce_regions.results.regions | sort(attribute='name') -%}
|
||||
{% if region.status == "UP" %}
|
||||
{% for zone in region.zones | sort(attribute='name') %}
|
||||
{% if zone.status == "UP" %}
|
||||
'{{ zone.name }}'
|
||||
{% endif %}{% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}{% if not loop.last %},{% endif %}
|
||||
{%- endfor -%}]
|
||||
|
||||
- name: Set facts about the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for region in gce_regions %}
|
||||
{%- if region == "us-east1-b" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://cloud.google.com/compute/docs/regions-zones/)
|
||||
{% for r in gce_regions %}
|
||||
{{ loop.index }}. {{ r }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _gce_region
|
||||
when: region is undefined
|
||||
|
||||
- set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _gce_region.user_input is defined and _gce_region.user_input != "" %}{{ gce_regions[_gce_region.user_input | int -1 ] }}
|
||||
{%- else %}{{ gce_regions[default_region | int - 1] }}{% endif %}
|
|
@ -1,8 +1,6 @@
|
|||
- block:
|
||||
- set_fact:
|
||||
access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||
secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
region: "{{ algo_region | default(lookup('env','AWS_DEFAULT_REGION'), true) }}"
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Create an instance
|
||||
lightsail:
|
||||
|
@ -10,8 +8,8 @@
|
|||
aws_secret_key: "{{ secret_key }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
state: present
|
||||
region: "{{ region }}"
|
||||
zone: "{{ region }}a"
|
||||
region: "{{ algo_region }}"
|
||||
zone: "{{ algo_region }}a"
|
||||
blueprint_id: "{{ cloud_providers.lightsail.image }}"
|
||||
bundle_id: "{{ cloud_providers.lightsail.size }}"
|
||||
wait_timeout: 300
|
||||
|
@ -37,15 +35,7 @@
|
|||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ algo_instance['instance']['public_ip_address'] }}"
|
||||
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ cloud_instance_ip }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: lightsail
|
||||
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
|
|
60
roles/cloud-lightsail/tasks/prompts.yml
Normal file
60
roles/cloud-lightsail/tasks/prompts.yml
Normal file
|
@ -0,0 +1,60 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md)
|
||||
echo: false
|
||||
register: _aws_access_key
|
||||
when:
|
||||
- aws_access_key is undefined
|
||||
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
echo: false
|
||||
register: _aws_secret_key
|
||||
when:
|
||||
- aws_secret_key is undefined
|
||||
- lookup('env','AWS_SECRET_ACCESS_KEY')|length <= 0
|
||||
|
||||
- set_fact:
|
||||
access_key: "{{ aws_access_key | default(_aws_access_key.user_input|default(None)) | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||
secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
|
||||
- block:
|
||||
- name: Get regions
|
||||
lightsail_region_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
register: _lightsail_regions
|
||||
|
||||
- name: Set facts about thre regions
|
||||
set_fact:
|
||||
lightsail_regions: "{{ _lightsail_regions.results.regions | sort(attribute='name') }}"
|
||||
|
||||
- name: Set the default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in lightsail_regions %}
|
||||
{%- if r['name'] == "eu-west-1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/)
|
||||
{% for r in lightsail_regions %}
|
||||
{{ loop.index }}. {{ r['name'] }} {{ r['displayName'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
||||
- set_fact:
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ lightsail_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- else %}{{ lightsail_regions[default_region | int - 1]['name'] }}{% endif %}
|
|
@ -1,4 +1,8 @@
|
|||
---
|
||||
- fail:
|
||||
msg: "OpenStack credentials are not set. Download it from the OpenStack dashboard->Compute->API Access and source it in the shell (eg: source /tmp/dhc-openrc.sh)"
|
||||
when: lookup('env', 'OS_AUTH_URL') == ""
|
||||
|
||||
- block:
|
||||
- name: Security group created
|
||||
os_security_group:
|
||||
|
@ -70,15 +74,7 @@
|
|||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ os_server['openstack']['public_v4'] }}"
|
||||
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ cloud_instance_ip }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: openstack
|
||||
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
|
|
4
roles/cloud-scaleway/defaults/main.yml
Normal file
4
roles/cloud-scaleway/defaults/main.yml
Normal file
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
scaleway_regions:
|
||||
- alias: par1
|
||||
- alias: ams1
|
|
@ -1,11 +1,14 @@
|
|||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Check if server exists
|
||||
uri:
|
||||
url: "https://cp-{{ algo_region }}.scaleway.com/servers"
|
||||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
register: scaleway_servers
|
||||
|
||||
|
@ -24,7 +27,7 @@
|
|||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
register: scaleway_organizations
|
||||
|
||||
|
@ -32,7 +35,7 @@
|
|||
set_fact:
|
||||
organization_id: "{{ item.id }}"
|
||||
no_log: true
|
||||
when: scaleway_organization == item.name
|
||||
when: algo_scaleway_org == item.name
|
||||
with_items: "{{ scaleway_organizations.json.organizations }}"
|
||||
|
||||
- name: Get total count of images
|
||||
|
@ -41,7 +44,7 @@
|
|||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
register: scaleway_pages
|
||||
|
||||
|
@ -68,7 +71,7 @@
|
|||
method: POST
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
body:
|
||||
organization: "{{ organization_id }}"
|
||||
name: "{{ algo_server_name }}"
|
||||
|
@ -94,7 +97,7 @@
|
|||
method: POST
|
||||
headers:
|
||||
Content-Type: application/json
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
body:
|
||||
action: poweron
|
||||
status_code: 202
|
||||
|
@ -108,7 +111,7 @@
|
|||
method: GET
|
||||
headers:
|
||||
Content-Type: 'application/json'
|
||||
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||
X-Auth-Token: "{{ algo_scaleway_token }}"
|
||||
status_code: 200
|
||||
until:
|
||||
- algo_instance.json.server.state is defined
|
||||
|
@ -119,15 +122,7 @@
|
|||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ algo_instance['json']['server']['public_ip']['address'] }}"
|
||||
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ cloud_instance_ip }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: root
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: scaleway
|
||||
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
|
|
34
roles/cloud-scaleway/tasks/prompts.yml
Normal file
34
roles/cloud-scaleway/tasks/prompts.yml
Normal file
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your auth token (https://www.scaleway.com/docs/generate-an-api-token/)
|
||||
echo: false
|
||||
register: _scaleway_token
|
||||
when: scaleway_token is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your organization name (https://cloud.scaleway.com/#/billing)
|
||||
register: _scaleway_org
|
||||
when: scaleway_org is undefined
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
{% for r in scaleway_regions %}
|
||||
{{ loop.index }}. {{ r['alias'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ scaleway_regions.0.alias }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
||||
- name: Set scaleway facts
|
||||
set_fact:
|
||||
algo_scaleway_token: "{{ scaleway_token | default(_scaleway_token.user_input) }}"
|
||||
algo_scaleway_org: "{{ scaleway_org | default(_scaleway_org.user_input|default(omit)) }}"
|
||||
algo_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ scaleway_regions[_algo_region.user_input | int -1 ]['alias'] }}
|
||||
{%- else %}{{ scaleway_regions.0.alias }}{% endif %}
|
36
roles/cloud-vultr/tasks/main.yml
Normal file
36
roles/cloud-vultr/tasks/main.yml
Normal file
|
@ -0,0 +1,36 @@
|
|||
- block:
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Upload the SSH key
|
||||
vr_ssh_key:
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
ssh_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
register: ssh_key
|
||||
|
||||
- name: Creating a server
|
||||
vr_server:
|
||||
name: "{{ algo_server_name }}"
|
||||
hostname: "{{ algo_server_name }}"
|
||||
os: "{{ cloud_providers.vultr.os }}"
|
||||
plan: "{{ cloud_providers.vultr.size }}"
|
||||
region: "{{ algo_vultr_region }}"
|
||||
state: started
|
||||
tag: Environment:Algo
|
||||
ssh_key: "{{ ssh_key.vultr_ssh_key.name }}"
|
||||
ipv6_enabled: true
|
||||
auto_backup_enabled: false
|
||||
notify_activate: false
|
||||
register: vultr_server
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ vultr_server.vultr_server.v4_main_ip }}"
|
||||
ansible_ssh_user: root
|
||||
|
||||
environment:
|
||||
VULTR_API_CONFIG: "{{ algo_vultr_config }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
56
roles/cloud-vultr/tasks/prompts.yml
Normal file
56
roles/cloud-vultr/tasks/prompts.yml
Normal file
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the local path to your configuration INI file
|
||||
(https://github.com/trailofbits/algo/docs/cloud-vultr.md):
|
||||
register: _vultr_config
|
||||
when: vultr_config is undefined
|
||||
|
||||
- name: Set the token as a fact
|
||||
set_fact:
|
||||
algo_vultr_config: "{{ vultr_config | default(_vultr_config.user_input) | default(lookup('env','VULTR_API_CONFIG'), true) }}"
|
||||
|
||||
- name: Get regions
|
||||
uri:
|
||||
url: https://api.vultr.com/v1/regions/list
|
||||
method: GET
|
||||
status_code: 200
|
||||
register: _vultr_regions
|
||||
|
||||
- name: Format regions
|
||||
set_fact:
|
||||
regions: >-
|
||||
[ {% for k, v in _vultr_regions.json.items() %}
|
||||
{{ v }}{% if not loop.last %},{% endif %}
|
||||
{% endfor %} ]
|
||||
|
||||
- name: Set regions as a fact
|
||||
set_fact:
|
||||
vultr_regions: "{{ regions | sort(attribute='country') }}"
|
||||
|
||||
- name: Set default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in vultr_regions %}
|
||||
{%- if r['DCID'] == "1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://www.vultr.com/locations/):
|
||||
{% for r in vultr_regions %}
|
||||
{{ loop.index }}. {{ r['name'] }}
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
[{{ default_region }}]
|
||||
register: _algo_region
|
||||
when: region is undefined
|
||||
|
||||
- name: Set the desired region as a fact
|
||||
set_fact:
|
||||
algo_vultr_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input is defined and _algo_region.user_input != "" %}{{ vultr_regions[_algo_region.user_input | int -1 ]['name'] }}
|
||||
{%- else %}{{ vultr_regions[default_region | int - 1]['name'] }}{% endif %}
|
26
roles/common/tasks/facts.yml
Normal file
26
roles/common/tasks/facts.yml
Normal file
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
- block:
|
||||
- name: Generate password for the CA key
|
||||
local_action:
|
||||
module: shell
|
||||
openssl rand -hex 16
|
||||
register: CA_password
|
||||
|
||||
- name: Generate p12 export password
|
||||
local_action:
|
||||
module: shell
|
||||
openssl rand 8 | python -c 'import sys,string; chars=string.ascii_letters + string.digits + "_@"; print "".join([chars[ord(c) % 64] for c in list(sys.stdin.read())])'
|
||||
register: p12_password_generated
|
||||
when: p12_password is not defined
|
||||
tags: update-users
|
||||
become: false
|
||||
|
||||
- name: Define facts
|
||||
set_fact:
|
||||
p12_export_password: "{{ p12_password|default(p12_password_generated.stdout) }}"
|
||||
tags: update-users
|
||||
|
||||
- set_fact:
|
||||
CA_password: "{{ CA_password.stdout }}"
|
||||
IP_subject_alt_name: "{{ IP_subject_alt_name }}"
|
||||
ipv6_support: "{% if ansible_default_ipv6['gateway'] is defined %}true{% else %}false{% endif %}"
|
|
@ -1,6 +1,13 @@
|
|||
---
|
||||
|
||||
- set_fact:
|
||||
config_prefix: "/usr/local/"
|
||||
root_group: wheel
|
||||
ssh_service_name: sshd
|
||||
apparmor_enabled: false
|
||||
strongswan_additional_plugins:
|
||||
- kernel-pfroute
|
||||
- kernel-pfkey
|
||||
ansible_python_interpreter: /usr/local/bin/python2.7
|
||||
tools:
|
||||
- git
|
||||
- subversion
|
||||
|
@ -17,6 +24,15 @@
|
|||
tags:
|
||||
- always
|
||||
|
||||
- setup:
|
||||
|
||||
- name: Install tools
|
||||
package: name="{{ item }}" state=present
|
||||
with_items:
|
||||
- "{{ tools|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: Loopback included into the rc config
|
||||
blockinfile:
|
||||
dest: /etc/rc.conf
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
---
|
||||
- block:
|
||||
- include_tasks: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
- name: Check the system
|
||||
raw: uname -a
|
||||
register: OS
|
||||
|
||||
- include_tasks: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
- include_tasks: ubuntu.yml
|
||||
when: '"Ubuntu" in OS.stdout or "Linux" in OS.stdout'
|
||||
|
||||
- name: Install tools
|
||||
package: name="{{ item }}" state=present
|
||||
with_items:
|
||||
- "{{ tools|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
- include_tasks: freebsd.yml
|
||||
when: '"FreeBSD" in OS.stdout'
|
||||
|
||||
- name: Sysctl tuning
|
||||
sysctl: name="{{ item.item }}" value="{{ item.value }}"
|
||||
with_items:
|
||||
- "{{ sysctl|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
- name: Gather additional facts
|
||||
import_tasks: facts.yml
|
||||
|
||||
- meta: flush_handlers
|
||||
- name: Sysctl tuning
|
||||
sysctl: name="{{ item.item }}" value="{{ item.value }}"
|
||||
with_items:
|
||||
- "{{ sysctl|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
- meta: flush_handlers
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
|
|
@ -1,52 +1,69 @@
|
|||
---
|
||||
- block:
|
||||
- name: Ubuntu | Install prerequisites
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
update_cache: true
|
||||
with_items:
|
||||
- python2.7
|
||||
- sudo
|
||||
|
||||
- name: Ubuntu | Configure defaults
|
||||
alternatives:
|
||||
name: python
|
||||
link: /usr/bin/python
|
||||
path: /usr/bin/python2.7
|
||||
priority: 1
|
||||
tags:
|
||||
- update-alternatives
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
- name: Gather facts
|
||||
setup:
|
||||
|
||||
- name: Cloud only tasks
|
||||
block:
|
||||
- name: Install software updates
|
||||
apt:
|
||||
update_cache: true
|
||||
install_recommends: true
|
||||
upgrade: dist
|
||||
- name: Install software updates
|
||||
apt:
|
||||
update_cache: true
|
||||
install_recommends: true
|
||||
upgrade: dist
|
||||
|
||||
- name: Upgrade the ca certificates
|
||||
apt:
|
||||
name: ca-certificates
|
||||
state: latest
|
||||
- name: Check if reboot is required
|
||||
shell: >
|
||||
if [[ -e /var/run/reboot-required ]]; then echo "required"; else echo "no"; fi
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: reboot_required
|
||||
|
||||
- name: Check if reboot is required
|
||||
shell: >
|
||||
if [[ -e /var/run/reboot-required ]]; then echo "required"; else echo "no"; fi
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: reboot_required
|
||||
- name: Reboot
|
||||
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
|
||||
async: 1
|
||||
poll: 0
|
||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||
ignore_errors: true
|
||||
|
||||
- name: Reboot
|
||||
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
|
||||
async: 1
|
||||
poll: 0
|
||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||
ignore_errors: true
|
||||
- name: Wait until SSH becomes ready...
|
||||
local_action:
|
||||
module: wait_for
|
||||
port: 22
|
||||
host: "{{ inventory_hostname }}"
|
||||
search_regex: OpenSSH
|
||||
delay: 10
|
||||
timeout: 320
|
||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||
become: false
|
||||
when: algo_provider != "local"
|
||||
|
||||
- name: Wait until SSH becomes ready...
|
||||
local_action:
|
||||
module: wait_for
|
||||
port: 22
|
||||
host: "{{ inventory_hostname }}"
|
||||
search_regex: OpenSSH
|
||||
delay: 10
|
||||
timeout: 320
|
||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||
become: false
|
||||
- name: Include unatteded upgrades configuration
|
||||
import_tasks: unattended-upgrades.yml
|
||||
|
||||
- name: Include unatteded upgrades configuration
|
||||
include_tasks: unattended-upgrades.yml
|
||||
|
||||
- name: Disable MOTD on login and SSHD
|
||||
replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}"
|
||||
with_items:
|
||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/login' }
|
||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/sshd' }
|
||||
tags:
|
||||
- cloud
|
||||
- name: Disable MOTD on login and SSHD
|
||||
replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}"
|
||||
with_items:
|
||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/login' }
|
||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/sshd' }
|
||||
|
||||
- name: Loopback for services configured
|
||||
template:
|
||||
|
@ -101,3 +118,10 @@
|
|||
value: 1
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: Install tools
|
||||
package: name="{{ item }}" state=present
|
||||
with_items:
|
||||
- "{{ tools|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
|
||||
dependencies:
|
||||
- { role: common, tags: common }
|
||||
- role: dns_encryption
|
||||
tags: dns_encryption
|
||||
when: dns_encryption == true
|
|
@ -1,10 +1,5 @@
|
|||
---
|
||||
- block:
|
||||
|
||||
- name: The DNS tag is defined
|
||||
set_fact:
|
||||
local_dns: true
|
||||
|
||||
- name: Dnsmasq installed
|
||||
package: name=dnsmasq
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ no-resolv
|
|||
# You can control how dnsmasq talks to a server: this forces
|
||||
# queries to 10.1.2.3 to be routed via eth1
|
||||
# server=10.1.2.3@eth1
|
||||
{% if dns_encryption|default(false)|bool == true %}
|
||||
{% if dns_encryption %}
|
||||
server={{ local_service_ip }}#5353
|
||||
{% else %}
|
||||
{% for host in dns_servers.ipv4 %}
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
---
|
||||
listen_port: "{% if local_dns|d(false)|bool == true %}5353{% else %}53{% endif %}"
|
||||
algo_local_dns: false
|
||||
listen_port: "{% if algo_local_dns %}5353{% else %}53{% endif %}"
|
||||
# the version used if the latest unavailable (in case of Github API rate limited)
|
||||
dnscrypt_proxy_version: 2.0.10
|
||||
apparmor_enabled: true
|
||||
dns_encryption: true
|
||||
dns_encryption_provider: "*"
|
||||
ipv6_support: false
|
||||
|
|
|
@ -8,3 +8,10 @@
|
|||
name: dnscrypt-proxy
|
||||
state: restarted
|
||||
daemon_reload: true
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: restart dnscrypt-proxy
|
||||
service:
|
||||
name: dnscrypt-proxy
|
||||
state: restarted
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: common
|
||||
tags: common
|
|
@ -5,7 +5,7 @@
|
|||
codename: bionic
|
||||
repo: ppa:shevchuk/dnscrypt-proxy
|
||||
register: result
|
||||
until: result|succeeded
|
||||
until: result is succeeded
|
||||
retries: 10
|
||||
delay: 3
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ tls_disable_session_tickets = true
|
|||
## Keep tls_cipher_suite empty if you have issues fetching sources or
|
||||
## connecting to some DoH servers. Google and Cloudflare are fine with it.
|
||||
|
||||
tls_cipher_suite = [49195]
|
||||
# tls_cipher_suite = [49195]
|
||||
|
||||
|
||||
## Fallback resolver
|
||||
|
|
|
@ -1,40 +1,8 @@
|
|||
---
|
||||
- block:
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ server_ip }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: "{{ server_user }}"
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
cloud_provider: local
|
||||
when: server_ip != "localhost"
|
||||
- name: Include prompts
|
||||
import_tasks: prompts.yml
|
||||
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ server_ip }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: "{{ server_user }}"
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_connection: local
|
||||
cloud_provider: local
|
||||
when: server_ip == "localhost"
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ server_ip }}"
|
||||
|
||||
- name: Ensure the group local exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[local]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[local\]'
|
||||
regexp: "^{{ server_ip }}.*"
|
||||
line: "{{ server_ip }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
|
44
roles/local/tasks/prompts.yml
Normal file
44
roles/local/tasks/prompts.yml
Normal file
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the IP address of your server: (or use localhost for local installation):
|
||||
[localhost]
|
||||
register: _algo_server
|
||||
when: server is undefined
|
||||
|
||||
- name: Set the facts
|
||||
set_fact:
|
||||
cloud_instance_ip: >-
|
||||
{% if server is defined %}{{ server }}
|
||||
{%- elif _algo_server.user_input is defined and _algo_server.user_input != "" %}{{ _algo_server.user_input }}
|
||||
{%- else %}localhost{% endif %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost)
|
||||
[root]
|
||||
register: _algo_ssh_user
|
||||
when:
|
||||
- ssh_user is undefined
|
||||
- cloud_instance_ip != "localhost"
|
||||
|
||||
- name: Set the facts
|
||||
set_fact:
|
||||
ansible_ssh_user: >-
|
||||
{% if ssh_user is defined %}{{ ssh_user }}
|
||||
{%- elif _algo_ssh_user.user_input is defined and _algo_ssh_user.user_input != "" %}{{ _algo_ssh_user.user_input }}
|
||||
{%- else %}root{% endif %}
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter the public IP address of your server: (IMPORTANT! This IP is used to verify the certificate)
|
||||
[{{ cloud_instance_ip }}]
|
||||
register: _endpoint
|
||||
when: endpoint is undefined
|
||||
|
||||
- name: Set the facts
|
||||
set_fact:
|
||||
IP_subject_alt_name: >-
|
||||
{% if endpoint is defined %}{{ endpoint }}
|
||||
{%- elif _endpoint.user_input is defined and _endpoint.user_input != "" %}{{ _endpoint.user_input }}
|
||||
{%- else %}{{ cloud_instance_ip }}{% endif %}
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
|
||||
dependencies:
|
||||
- { role: common, tags: common }
|
|
@ -36,11 +36,12 @@
|
|||
ssh_key_type: ecdsa
|
||||
ssh_key_bits: 256
|
||||
ssh_key_comment: '{{ item }}@{{ IP_subject_alt_name }}'
|
||||
ssh_key_passphrase: "{{ easyrsa_p12_export_password }}"
|
||||
ssh_key_passphrase: "{{ p12_export_password }}"
|
||||
update_password: on_create
|
||||
state: present
|
||||
append: yes
|
||||
with_items: "{{ users }}"
|
||||
tags: update-users
|
||||
|
||||
- name: The authorized keys file created
|
||||
file:
|
||||
|
@ -50,6 +51,7 @@
|
|||
group: "{{ item }}"
|
||||
state: link
|
||||
with_items: "{{ users }}"
|
||||
tags: update-users
|
||||
|
||||
- name: Generate SSH fingerprints
|
||||
shell: ssh-keyscan {{ IP_subject_alt_name }} 2>/dev/null
|
||||
|
@ -60,12 +62,9 @@
|
|||
src: '/var/jail/{{ item }}/.ssh/id_ecdsa'
|
||||
dest: configs/{{ IP_subject_alt_name }}/{{ item }}.ssh.pem
|
||||
flat: yes
|
||||
mode: "0600"
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Change mode for SSH private keys
|
||||
local_action: file path=configs/{{ IP_subject_alt_name }}/{{ item }}.ssh.pem mode=0600
|
||||
with_items: "{{ users }}"
|
||||
become: false
|
||||
tags: update-users
|
||||
|
||||
- name: Fetch the known_hosts file
|
||||
local_action:
|
||||
|
@ -80,15 +79,15 @@
|
|||
src: ssh_config.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/{{ item }}.ssh_config
|
||||
mode: 0600
|
||||
become: no
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
become: false
|
||||
tags: update-users
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: SSH | Get active system users
|
||||
shell: >
|
||||
getent group algo | cut -f4 -d: | sed "s/,/\n/g"
|
||||
register: valid_users
|
||||
when: ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y"
|
||||
tags: update-users
|
||||
|
||||
- name: SSH | Delete non-existing users
|
||||
user:
|
||||
|
@ -96,8 +95,9 @@
|
|||
state: absent
|
||||
remove: yes
|
||||
force: yes
|
||||
when: item not in users and ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y"
|
||||
when: item not in users
|
||||
with_items: "{{ valid_users.stdout_lines | default('null') }}"
|
||||
tags: update-users
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
|
|
@ -1,5 +1,37 @@
|
|||
---
|
||||
BetweenClients_DROP: true
|
||||
wireguard_config_path: "configs/{{ IP_subject_alt_name }}/wireguard/"
|
||||
wireguard_interface: wg0
|
||||
wireguard_network_ipv4:
|
||||
subnet: 10.19.49.0
|
||||
prefix: 24
|
||||
gateway: 10.19.49.1
|
||||
clients_range: 10.19.49
|
||||
clients_start: 100
|
||||
wireguard_network_ipv6:
|
||||
subnet: 'fd9d:bc11:4021::'
|
||||
prefix: 48
|
||||
gateway: 'fd9d:bc11:4021::1'
|
||||
clients_range: 'fd9d:bc11:4021::'
|
||||
clients_start: 100
|
||||
wireguard_vpn_network: "{{ wireguard_network_ipv4['subnet'] }}/{{ wireguard_network_ipv4['prefix'] }}"
|
||||
wireguard_vpn_network_ipv6: "{{ wireguard_network_ipv6['subnet'] }}/{{ wireguard_network_ipv6['prefix'] }}"
|
||||
keys_clean_all: false
|
||||
wireguard_dns_servers: >-
|
||||
{% if local_dns|default(false)|bool or dns_encryption|default(false)|bool == true %}
|
||||
{{ local_service_ip }}
|
||||
{% else %}
|
||||
{% for host in dns_servers.ipv4 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% if ipv6_support %},{% for host in dns_servers.ipv6 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
|
||||
{% endif %}
|
||||
|
||||
algo_ondemand_cellular: false
|
||||
algo_ondemand_wifi: false
|
||||
algo_ondemand_wifi_exclude: '_null'
|
||||
algo_windows: false
|
||||
algo_store_cakey: false
|
||||
algo_local_dns: false
|
||||
ipv6_support: false
|
||||
dns_encryption: true
|
||||
domain: false
|
||||
subjectAltName_IP: "IP:{{ IP_subject_alt_name }}"
|
||||
openssl_bin: openssl
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
---
|
||||
|
||||
dependencies:
|
||||
- { role: common, tags: common }
|
||||
- role: dns_encryption
|
||||
tags: dns_encryption
|
||||
when: dns_encryption == true
|
||||
when: dns_encryption
|
||||
|
|
|
@ -37,23 +37,12 @@
|
|||
with_items:
|
||||
- "{{ users }}"
|
||||
|
||||
- name: Create the windows check file
|
||||
file:
|
||||
state: touch
|
||||
path: configs/{{ IP_subject_alt_name }}/.supports_windows
|
||||
when: Win10_Enabled is defined and Win10_Enabled == "Y"
|
||||
|
||||
- name: Check if the windows check file exists
|
||||
stat:
|
||||
path: configs/{{ IP_subject_alt_name }}/.supports_windows
|
||||
register: supports_windows
|
||||
|
||||
- name: Build the windows client powershell script
|
||||
template:
|
||||
src: client_windows.ps1.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/windows_{{ item.0 }}.ps1
|
||||
mode: 0600
|
||||
when: Win10_Enabled is defined and Win10_Enabled == "Y" or supports_windows.stat.exists == true
|
||||
when: algo_windows
|
||||
with_together:
|
||||
- "{{ users }}"
|
||||
- "{{ PayloadContent.results }}"
|
||||
|
|
|
@ -1,114 +0,0 @@
|
|||
---
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Get the existing kernel parameters
|
||||
command: sysctl -b kern.conftxt
|
||||
register: kern_conftxt
|
||||
when: rebuild_kernel is defined and rebuild_kernel == "true"
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Set the rebuild_needed fact
|
||||
set_fact:
|
||||
rebuild_needed: true
|
||||
when: item not in kern_conftxt.stdout and rebuild_kernel is defined and rebuild_kernel == "true"
|
||||
with_items:
|
||||
- "IPSEC"
|
||||
- "IPSEC_NAT_T"
|
||||
- "crypto"
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Make the kernel config
|
||||
shell: sysctl -b kern.conftxt > /tmp/IPSEC
|
||||
when: rebuild_needed is defined and rebuild_needed == true
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Ensure the all options are enabled
|
||||
lineinfile:
|
||||
dest: /tmp/IPSEC
|
||||
line: "{{ item }}"
|
||||
insertbefore: BOF
|
||||
with_items:
|
||||
- "options IPSEC"
|
||||
- "options IPSEC_NAT_T"
|
||||
- "device crypto"
|
||||
when: rebuild_needed is defined and rebuild_needed == true
|
||||
|
||||
- name: HardenedBSD | Determine the sources
|
||||
set_fact:
|
||||
sources_repo: https://github.com/HardenedBSD/hardenedBSD.git
|
||||
sources_version: "hardened/{{ ansible_distribution_release.split('.')[0] }}-stable/master"
|
||||
when: "'Hardened' in ansible_distribution_version"
|
||||
|
||||
- name: FreeBSD | Determine the sources
|
||||
set_fact:
|
||||
sources_repo: https://github.com/freebsd/freebsd.git
|
||||
sources_version: "stable/{{ ansible_distribution_major_version }}"
|
||||
when: "'Hardened' not in ansible_distribution_version"
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Increase the git postBuffer size
|
||||
git_config:
|
||||
name: http.postBuffer
|
||||
scope: global
|
||||
value: 1048576000
|
||||
|
||||
- block:
|
||||
- name: FreeBSD / HardenedBSD | Fetching the sources...
|
||||
git:
|
||||
repo: "{{ sources_repo }}"
|
||||
dest: /usr/krnl_src
|
||||
version: "{{ sources_version }}"
|
||||
accept_hostkey: true
|
||||
async: 1000
|
||||
poll: 0
|
||||
register: fetching_sources
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Fetching the sources...
|
||||
async_status: jid={{ fetching_sources.ansible_job_id }}
|
||||
when: rebuild_needed is defined and rebuild_needed == true
|
||||
register: result
|
||||
until: result.finished
|
||||
retries: 600
|
||||
delay: 30
|
||||
rescue:
|
||||
- debug: var=fetching_sources
|
||||
|
||||
- fail:
|
||||
msg: "Something went wrong. Check the debug output above."
|
||||
|
||||
- block:
|
||||
- name: FreeBSD / HardenedBSD | The kernel is being built...
|
||||
shell: >
|
||||
mv /tmp/IPSEC /usr/krnl_src/sys/{{ ansible_architecture }}/conf &&
|
||||
make buildkernel KERNCONF=IPSEC &&
|
||||
make installkernel KERNCONF=IPSEC
|
||||
args:
|
||||
chdir: /usr/krnl_src
|
||||
executable: /usr/local/bin/bash
|
||||
when: rebuild_needed is defined and rebuild_needed == true
|
||||
async: 1000
|
||||
poll: 0
|
||||
register: building_kernel
|
||||
|
||||
- name: FreeBSD / HardenedBSD | The kernel is being built...
|
||||
async_status: jid={{ building_kernel.ansible_job_id }}
|
||||
when: rebuild_needed is defined and rebuild_needed == true
|
||||
register: result
|
||||
until: result.finished
|
||||
retries: 600
|
||||
delay: 30
|
||||
rescue:
|
||||
- debug: var=building_kernel
|
||||
|
||||
- fail:
|
||||
msg: "Something went wrong. Check the debug output above."
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Reboot
|
||||
shell: sleep 2 && shutdown -r now
|
||||
args:
|
||||
executable: /usr/local/bin/bash
|
||||
when: rebuild_needed is defined and rebuild_needed == true
|
||||
async: 1
|
||||
poll: 0
|
||||
ignore_errors: true
|
||||
|
||||
- name: FreeBSD / HardenedBSD | Enable strongswan
|
||||
lineinfile:
|
||||
dest: /etc/rc.conf
|
||||
regexp: ^strongswan_enable=
|
||||
line: 'strongswan_enable="YES"'
|
|
@ -1,5 +1,11 @@
|
|||
---
|
||||
- block:
|
||||
- name: Include WireGuard role
|
||||
include_role:
|
||||
name: wireguard
|
||||
tags: wireguard
|
||||
when: wireguard_enabled and ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Ensure that the strongswan group exist
|
||||
group: name=strongswan state=present
|
||||
|
||||
|
@ -9,25 +15,25 @@
|
|||
- include_tasks: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
|
||||
- include_tasks: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
|
||||
- name: Install strongSwan
|
||||
package: name=strongswan state=present
|
||||
|
||||
- include_tasks: ipsec_configuration.yml
|
||||
- include_tasks: openssl.yml
|
||||
- import_tasks: ipsec_configuration.yml
|
||||
- import_tasks: openssl.yml
|
||||
tags: update-users
|
||||
- include_tasks: distribute_keys.yml
|
||||
- include_tasks: client_configs.yml
|
||||
- import_tasks: distribute_keys.yml
|
||||
- import_tasks: client_configs.yml
|
||||
delegate_to: localhost
|
||||
become: no
|
||||
tags: update-users
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: strongSwan started
|
||||
service: name=strongswan state=started
|
||||
service:
|
||||
name: strongswan
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- meta: flush_handlers
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
file:
|
||||
dest: configs/{{ IP_subject_alt_name }}/pki
|
||||
state: absent
|
||||
when: easyrsa_reinit_existent|bool == True
|
||||
when: keys_clean_all|bool == True
|
||||
|
||||
- name: Ensure the pki directories exist
|
||||
file:
|
||||
|
@ -49,7 +49,7 @@
|
|||
-keyout private/cakey.pem
|
||||
-out cacert.pem -x509 -days 3650
|
||||
-batch
|
||||
-passout pass:"{{ easyrsa_CA_password }}" &&
|
||||
-passout pass:"{{ CA_password }}" &&
|
||||
touch {{ IP_subject_alt_name }}_ca_generated
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
|
@ -75,14 +75,14 @@
|
|||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}"))
|
||||
-keyout private/{{ IP_subject_alt_name }}.key
|
||||
-out reqs/{{ IP_subject_alt_name }}.req -nodes
|
||||
-passin pass:"{{ easyrsa_CA_password }}"
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ IP_subject_alt_name }}" -batch &&
|
||||
{{ openssl_bin }} ca -utf8
|
||||
-in reqs/{{ IP_subject_alt_name }}.req
|
||||
-out certs/{{ IP_subject_alt_name }}.crt
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}"))
|
||||
-days 3650 -batch
|
||||
-passin pass:"{{ easyrsa_CA_password }}"
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ IP_subject_alt_name }}" &&
|
||||
touch certs/{{ IP_subject_alt_name }}_crt_generated
|
||||
args:
|
||||
|
@ -97,14 +97,14 @@
|
|||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:{{ item }}"))
|
||||
-keyout private/{{ item }}.key
|
||||
-out reqs/{{ item }}.req -nodes
|
||||
-passin pass:"{{ easyrsa_CA_password }}"
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ item }}" -batch &&
|
||||
{{ openssl_bin }} ca -utf8
|
||||
-in reqs/{{ item }}.req
|
||||
-out certs/{{ item }}.crt
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:{{ item }}"))
|
||||
-days 3650 -batch
|
||||
-passin pass:"{{ easyrsa_CA_password }}"
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ item }}" &&
|
||||
touch certs/{{ item }}_crt_generated
|
||||
args:
|
||||
|
@ -121,7 +121,7 @@
|
|||
-export
|
||||
-name {{ item }}
|
||||
-out private/{{ item }}.p12
|
||||
-passout pass:"{{ easyrsa_p12_export_password }}"
|
||||
-passout pass:"{{ p12_export_password }}"
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
executable: bash
|
||||
|
@ -150,7 +150,7 @@
|
|||
shell: >
|
||||
{{ openssl_bin }} ca -gencrl
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:{{ item }}"))
|
||||
-passin pass:"{{ easyrsa_CA_password }}"
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-revoke certs/{{ item }}.crt
|
||||
-out crl/{{ item }}.crt
|
||||
register: gencrl
|
||||
|
@ -165,7 +165,7 @@
|
|||
shell: >
|
||||
{{ openssl_bin }} ca -gencrl
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:{{ IP_subject_alt_name }}"))
|
||||
-passin pass:"{{ easyrsa_CA_password }}"
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-out crl/algo.root.pem
|
||||
when:
|
||||
- gencrl is defined
|
||||
|
|
|
@ -6,7 +6,7 @@ conn ikev2-{{ IP_subject_alt_name }}
|
|||
compress=no
|
||||
dpddelay=35s
|
||||
|
||||
{% if Win10_Enabled is defined and Win10_Enabled == "Y" %}
|
||||
{% if algo_windows %}
|
||||
ike={{ ciphers.compat.ike }}
|
||||
esp={{ ciphers.compat.esp }}
|
||||
{% else %}
|
||||
|
|
|
@ -10,7 +10,7 @@ conn %default
|
|||
compress=yes
|
||||
dpddelay=35s
|
||||
|
||||
{% if Win10_Enabled is defined and Win10_Enabled == "Y" %}
|
||||
{% if algo_windows %}
|
||||
ike={{ ciphers.compat.ike }}
|
||||
esp={{ ciphers.compat.esp }}
|
||||
{% else %}
|
||||
|
@ -28,7 +28,7 @@ conn %default
|
|||
right=%any
|
||||
rightauth=pubkey
|
||||
rightsourceip={{ vpn_network }},{{ vpn_network_ipv6 }}
|
||||
{% if local_dns|d(false)|bool == true or dns_encryption|d(false)|bool == true %}
|
||||
{% if algo_local_dns or dns_encryption %}
|
||||
rightdns={{ local_service_ip }}
|
||||
{% else %}
|
||||
rightdns={% for host in dns_servers.ipv4 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% if ipv6_support %},{% for host in dns_servers.ipv6 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
|
||||
|
|
|
@ -7,13 +7,13 @@
|
|||
<dict>
|
||||
<key>IKEv2</key>
|
||||
<dict>
|
||||
{% if (OnDemandEnabled_WIFI is defined and OnDemandEnabled_WIFI == 'Y') or (OnDemandEnabled_Cellular is defined and OnDemandEnabled_Cellular == 'Y') %}
|
||||
{% if algo_ondemand_wifi or algo_ondemand_cellular %}
|
||||
<key>OnDemandEnabled</key>
|
||||
<integer>1</integer>
|
||||
<key>OnDemandRules</key>
|
||||
<array>
|
||||
{% if OnDemandEnabled_WIFI_EXCLUDE is defined and OnDemandEnabled_WIFI_EXCLUDE != '_null' %}
|
||||
{% set WIFI_EXCLUDE_LIST = OnDemandEnabled_WIFI_EXCLUDE.split(',') %}
|
||||
{% if algo_ondemand_wifi_exclude != '_null' %}
|
||||
{% set WIFI_EXCLUDE_LIST = (algo_ondemand_wifi_exclude|string).split(',') %}
|
||||
<dict>
|
||||
<key>Action</key>
|
||||
<string>Disconnect</string>
|
||||
|
@ -30,7 +30,7 @@
|
|||
{% endif %}
|
||||
<dict>
|
||||
<key>Action</key>
|
||||
{% if OnDemandEnabled_WIFI is defined and OnDemandEnabled_WIFI == 'Y' %}
|
||||
{% if algo_ondemand_wifi %}
|
||||
<string>Connect</string>
|
||||
{% else %}
|
||||
<string>Disconnect</string>
|
||||
|
@ -42,7 +42,7 @@
|
|||
</dict>
|
||||
<dict>
|
||||
<key>Action</key>
|
||||
{% if OnDemandEnabled_Cellular is defined and OnDemandEnabled_Cellular == 'Y' %}
|
||||
{% if algo_ondemand_cellular %}
|
||||
<string>Connect</string>
|
||||
{% else %}
|
||||
<string>Disconnect</string>
|
||||
|
|
|
@ -70,7 +70,7 @@ COMMIT
|
|||
-A INPUT -d {{ local_service_ip }} -p udp --dport 53 -j ACCEPT
|
||||
|
||||
# Drop traffic between VPN clients
|
||||
{% if BetweenClients_DROP is defined and BetweenClients_DROP == "Y" %}
|
||||
{% if BetweenClients_DROP %}
|
||||
{% set BetweenClientsPolicy = "DROP" %}
|
||||
{% endif %}
|
||||
-A FORWARD -s {{ vpn_network }}{% if wireguard_enabled %},{{ wireguard_vpn_network }}{% endif %} -d {{ vpn_network }}{% if wireguard_enabled %},{{ wireguard_vpn_network }}{% endif %} -j {{ BetweenClientsPolicy | default("ACCEPT") }}
|
||||
|
|
|
@ -85,7 +85,7 @@ COMMIT
|
|||
-A INPUT -d fcaa::1 -p udp --dport 53 -j ACCEPT
|
||||
|
||||
# Drop traffic between VPN clients
|
||||
{% if BetweenClients_DROP is defined and BetweenClients_DROP == "Y" %}
|
||||
{% if BetweenClients_DROP %}
|
||||
{% set BetweenClientsPolicy = "DROP" %}
|
||||
{% endif %}
|
||||
-A FORWARD -s {{ vpn_network_ipv6 }}{% if wireguard_enabled %},{{ wireguard_vpn_network_ipv6 }}{% endif %} -d {{ vpn_network_ipv6 }}{% if wireguard_enabled %},{{ wireguard_vpn_network_ipv6 }}{% endif %} -j {{ BetweenClientsPolicy | default("ACCEPT") }}
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
---
|
||||
wireguard_config_path: "configs/{{ IP_subject_alt_name }}/wireguard/"
|
||||
wireguard_interface: wg0
|
||||
wireguard_network_ipv4:
|
||||
subnet: 10.19.49.0
|
||||
prefix: 24
|
||||
gateway: 10.19.49.1
|
||||
clients_range: 10.19.49
|
||||
clients_start: 100
|
||||
wireguard_network_ipv6:
|
||||
subnet: 'fd9d:bc11:4021::'
|
||||
prefix: 48
|
||||
gateway: 'fd9d:bc11:4021::1'
|
||||
clients_range: 'fd9d:bc11:4021::'
|
||||
clients_start: 100
|
||||
wireguard_vpn_network: "{{ wireguard_network_ipv4['subnet'] }}/{{ wireguard_network_ipv4['prefix'] }}"
|
||||
wireguard_vpn_network_ipv6: "{{ wireguard_network_ipv6['subnet'] }}/{{ wireguard_network_ipv6['prefix'] }}"
|
||||
easyrsa_reinit_existent: false
|
||||
wireguard_dns_servers: >-
|
||||
{% if local_dns|default(false)|bool or dns_encryption|default(false)|bool == true %}
|
||||
{{ local_service_ip }}
|
||||
{% else %}
|
||||
{% for host in dns_servers.ipv4 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% if ipv6_support %},{% for host in dns_servers.ipv6 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
|
||||
{% endif %}
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
dependencies:
|
||||
- { role: common, tags: common }
|
|
@ -3,7 +3,7 @@
|
|||
file:
|
||||
dest: "/etc/wireguard/private_{{ item }}.lock"
|
||||
state: absent
|
||||
when: easyrsa_reinit_existent|bool == True
|
||||
when: keys_clean_all|bool == True
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- "{{ IP_subject_alt_name }}"
|
||||
|
@ -13,7 +13,6 @@
|
|||
register: wg_genkey
|
||||
args:
|
||||
creates: "/etc/wireguard/private_{{ item }}.lock"
|
||||
executable: bash
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- "{{ IP_subject_alt_name }}"
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
repo: ppa:wireguard/wireguard
|
||||
state: present
|
||||
register: result
|
||||
until: result|succeeded
|
||||
until: result is succeeded
|
||||
retries: 10
|
||||
delay: 3
|
||||
|
||||
|
|
65
server.yml
Normal file
65
server.yml
Normal file
|
@ -0,0 +1,65 @@
|
|||
---
|
||||
- name: Configure the server and install required software
|
||||
hosts: vpn-host
|
||||
gather_facts: false
|
||||
tags: algo
|
||||
become: true
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
roles:
|
||||
- role: common
|
||||
- role: dns_adblocking
|
||||
when: algo_local_dns
|
||||
tags: dns_adblocking
|
||||
- role: ssh_tunneling
|
||||
when: algo_ssh_tunneling
|
||||
tags: ssh_tunneling
|
||||
- role: vpn
|
||||
tags: vpn
|
||||
|
||||
post_tasks:
|
||||
- block:
|
||||
- name: Delete the CA key
|
||||
local_action:
|
||||
module: file
|
||||
path: "configs/{{ IP_subject_alt_name }}/pki/private/cakey.pem"
|
||||
state: absent
|
||||
become: false
|
||||
when: not algo_store_cakey
|
||||
|
||||
- name: Dump the configuration
|
||||
local_action:
|
||||
module: copy
|
||||
dest: "configs/{{ IP_subject_alt_name }}/config.yml"
|
||||
content: |
|
||||
server: {{ 'localhost' if inventory_hostname == 'localhost' else inventory_hostname }}
|
||||
server_user: {{ ansible_ssh_user }}
|
||||
{% if algo_provider != "local" %}
|
||||
ansible_ssh_private_key_file: {{ ansible_ssh_private_key_file|default(SSH_keys.private) }}
|
||||
{% endif %}
|
||||
algo_provider: {{ algo_provider }}
|
||||
algo_server_name: {{ algo_server_name }}
|
||||
algo_ondemand_cellular: {{ algo_ondemand_cellular }}
|
||||
algo_ondemand_wifi: {{ algo_ondemand_wifi }}
|
||||
algo_ondemand_wifi_exclude: {{ algo_ondemand_wifi_exclude }}
|
||||
algo_local_dns: {{ algo_local_dns }}
|
||||
algo_ssh_tunneling: {{ algo_ssh_tunneling }}
|
||||
algo_windows: {{ algo_windows }}
|
||||
algo_store_cakey: {{ algo_store_cakey }}
|
||||
IP_subject_alt_name: {{ IP_subject_alt_name }}
|
||||
{% if tests|default(false)|bool %}ca_password: {{ CA_password }}{% endif %}
|
||||
become: false
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass }}"
|
||||
- " {% if algo_store_cakey %}{{ congrats.ca_key_pass }}{% endif %}"
|
||||
- " {% if algo_provider != 'local' %}{{ congrats.ssh_access }}{% endif %}"
|
||||
tags: always
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
|
@ -2,12 +2,11 @@
|
|||
|
||||
set -ex
|
||||
|
||||
DEPLOY_ARGS="server_ip=$LXC_IP server_user=ubuntu IP_subject_alt_name=$LXC_IP local_dns=true dns_over_https=true apparmor_enabled=false install_headers=false"
|
||||
touch /tmp/ca_password
|
||||
DEPLOY_ARGS="provider=local server=$LXC_IP ssh_user=ubuntu endpoint=$LXC_IP apparmor_enabled=false ondemand_cellular=true ondemand_wifi=true ondemand_wifi_exclude=test local_dns=true ssh_tunneling=true windows=true store_cakey=true install_headers=false tests=true"
|
||||
|
||||
if [ "${LXC_NAME}" == "docker" ]
|
||||
then
|
||||
docker run -it -v /tmp/ca_password:/tmp/ca_password -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "DEPLOY_ARGS=${DEPLOY_ARGS}" travis/algo /bin/sh -c "chown -R 0:0 /root/.ssh && source env/bin/activate && ansible-playbook deploy.yml -t cloud,local,vpn,dns,ssh_tunneling,security,tests,dns_over_https -e \"${DEPLOY_ARGS}\" --skip-tags apparmor"
|
||||
docker run -it -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "DEPLOY_ARGS=${DEPLOY_ARGS}" travis/algo /bin/sh -c "chown -R 0:0 /root/.ssh && source env/bin/activate && ansible-playbook main.yml -e \"${DEPLOY_ARGS}\" --skip-tags apparmor"
|
||||
else
|
||||
ansible-playbook deploy.yml -t cloud,local,vpn,dns,dns_over_https,ssh_tunneling,tests -e "${DEPLOY_ARGS}" --skip-tags apparmor
|
||||
ansible-playbook main.yml -e "${DEPLOY_ARGS}" --skip-tags apparmor
|
||||
fi
|
||||
|
|
|
@ -2,16 +2,13 @@
|
|||
|
||||
set -ex
|
||||
|
||||
CAPW=`cat /tmp/ca_password`
|
||||
USER_ARGS="server_ip=$LXC_IP server_user=ubuntu ssh_tunneling_enabled=y IP_subject=$LXC_IP easyrsa_CA_password=$CAPW apparmor_enabled=false install_headers=false"
|
||||
|
||||
sed -i 's/- jack$/- jack_test/' config.cfg
|
||||
USER_ARGS="{ 'server': '$LXC_IP', 'users': ['user1', 'user2'] }"
|
||||
|
||||
if [ "${LXC_NAME}" == "docker" ]
|
||||
then
|
||||
docker run -it -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "USER_ARGS=${USER_ARGS}" travis/algo /bin/sh -c "chown -R 0:0 /root/.ssh && source env/bin/activate && ansible-playbook users.yml -e \"${USER_ARGS}\" -t update-users --skip-tags common"
|
||||
docker run -it -v $(pwd)/config.cfg:/algo/config.cfg -v ~/.ssh:/root/.ssh -v $(pwd)/configs:/algo/configs -e "USER_ARGS=${USER_ARGS}" travis/algo /bin/sh -c "chown -R 0:0 /root/.ssh && source env/bin/activate && ansible-playbook users.yml -e \"${USER_ARGS}\" -t update-users"
|
||||
else
|
||||
ansible-playbook users.yml -e "${USER_ARGS}" -t update-users --skip-tags common
|
||||
ansible-playbook users.yml -e "${USER_ARGS}" -t update-users
|
||||
fi
|
||||
|
||||
if sudo openssl crl -inform pem -noout -text -in configs/$LXC_IP/pki/crl/jack.crt | grep CRL
|
||||
|
@ -22,7 +19,7 @@ if sudo openssl crl -inform pem -noout -text -in configs/$LXC_IP/pki/crl/jack.cr
|
|||
exit 1
|
||||
fi
|
||||
|
||||
if sudo openssl x509 -inform pem -noout -text -in configs/$LXC_IP/pki/certs/jack_test.crt | grep CN=jack_test
|
||||
if sudo openssl x509 -inform pem -noout -text -in configs/$LXC_IP/pki/certs/user1.crt | grep CN=user1
|
||||
then
|
||||
echo "The new user exists"
|
||||
else
|
||||
|
|
76
users.yml
76
users.yml
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
tags: always
|
||||
|
@ -8,27 +7,43 @@
|
|||
|
||||
tasks:
|
||||
- block:
|
||||
- pause:
|
||||
prompt: "Enter the IP address of your server: (or use localhost for local installation)"
|
||||
register: _server
|
||||
when: server is undefined
|
||||
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
algo_server: >-
|
||||
{% if server is defined %}{{ server }}
|
||||
{%- elif _server.user_input is defined and _server.user_input != "" %}{{ _server.user_input }}
|
||||
{%- else %}omit{% endif %}
|
||||
|
||||
- name: Import host specific variables
|
||||
include_vars:
|
||||
file: "configs/{{ algo_server }}/config.yml"
|
||||
|
||||
- pause:
|
||||
prompt: Enter the password for the private CA key
|
||||
echo: false
|
||||
register: _ca_password
|
||||
when: ca_password is undefined
|
||||
|
||||
- name: Set facts based on the input
|
||||
set_fact:
|
||||
CA_password: >-
|
||||
{% if ca_password is defined %}{{ ca_password }}
|
||||
{%- elif _ca_password.user_input is defined and _ca_password.user_input != "" %}{{ _ca_password.user_input }}
|
||||
{%- else %}omit{% endif %}
|
||||
|
||||
- name: Add the server to the vpn-host group
|
||||
add_host:
|
||||
hostname: "{{ server_ip }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: "{{ server_user }}"
|
||||
name: "{{ algo_server }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: "{{ server_user|default('root') }}"
|
||||
ansible_connection: "{% if algo_server == 'localhost' %}local{% else %}ssh{% endif %}"
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ssh_tunneling_enabled: "{{ ssh_tunneling_enabled }}"
|
||||
easyrsa_CA_password: "{{ easyrsa_CA_password }}"
|
||||
IP_subject: "{{ IP_subject_alt_name }}"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
|
||||
- name: Wait until SSH becomes ready...
|
||||
local_action:
|
||||
module: wait_for
|
||||
port: 22
|
||||
host: "{{ server_ip }}"
|
||||
search_regex: "OpenSSH"
|
||||
delay: 10
|
||||
timeout: 320
|
||||
state: present
|
||||
become: false
|
||||
CA_password: "{{ CA_password }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
|
@ -41,22 +56,17 @@
|
|||
become: true
|
||||
vars_files:
|
||||
- config.cfg
|
||||
|
||||
pre_tasks:
|
||||
- block:
|
||||
- name: Common pre-tasks
|
||||
include_tasks: playbooks/common.yml
|
||||
tags: always
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
- "configs/{{ inventory_hostname }}/config.yml"
|
||||
|
||||
roles:
|
||||
- { role: ssh_tunneling, tags: always, when: ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y" }
|
||||
- { role: wireguard, tags: [ 'vpn', 'wireguard' ], when: wireguard_enabled }
|
||||
- { role: vpn }
|
||||
- role: common
|
||||
- role: ssh_tunneling
|
||||
when: algo_ssh_tunneling
|
||||
- role: wireguard
|
||||
tags: [ 'vpn', 'wireguard' ]
|
||||
when: wireguard_enabled
|
||||
- role: vpn
|
||||
tags: vpn
|
||||
|
||||
post_tasks:
|
||||
- block:
|
||||
|
|
Loading…
Add table
Reference in a new issue