mirror of
https://github.com/trailofbits/algo.git
synced 2025-06-05 22:54:01 +02:00
change the troubleshooting url
This commit is contained in:
parent
2f5c050fd2
commit
bd348af9c2
13 changed files with 877 additions and 775 deletions
|
@ -78,3 +78,8 @@ cloud_providers:
|
|||
size: f1-micro
|
||||
image: ubuntu-1604 # ubuntu-1604 / ubuntu-1704
|
||||
local:
|
||||
|
||||
fail_hint:
|
||||
- Sorry, but something went wrong!
|
||||
- Please check the troubleshooting guide.
|
||||
- https://trailofbits.github.io/algo/troubleshooting.html
|
||||
|
|
94
deploy.yml
94
deploy.yml
|
@ -5,15 +5,21 @@
|
|||
- config.cfg
|
||||
|
||||
pre_tasks:
|
||||
- name: Local pre-tasks
|
||||
include: playbooks/local.yml
|
||||
tags: [ 'always' ]
|
||||
- block:
|
||||
- name: Local pre-tasks
|
||||
include: playbooks/local.yml
|
||||
tags: [ 'always' ]
|
||||
|
||||
- name: Local pre-tasks
|
||||
include: playbooks/local_ssh.yml
|
||||
become: false
|
||||
when: Deployed_By_Algo is defined and Deployed_By_Algo == "Y"
|
||||
tags: [ 'local' ]
|
||||
- name: Local pre-tasks
|
||||
include: playbooks/local_ssh.yml
|
||||
become: false
|
||||
when: Deployed_By_Algo is defined and Deployed_By_Algo == "Y"
|
||||
tags: [ 'local' ]
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
roles:
|
||||
- { role: cloud-digitalocean, tags: ['digitalocean'] }
|
||||
|
@ -23,10 +29,16 @@
|
|||
- { role: local, tags: ['local'] }
|
||||
|
||||
post_tasks:
|
||||
- name: Local post-tasks
|
||||
include: playbooks/post.yml
|
||||
become: false
|
||||
tags: [ 'cloud' ]
|
||||
- block:
|
||||
- name: Local post-tasks
|
||||
include: playbooks/post.yml
|
||||
become: false
|
||||
tags: [ 'cloud' ]
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
- name: Configure the server and install required software
|
||||
hosts: vpn-host
|
||||
|
@ -37,9 +49,15 @@
|
|||
- config.cfg
|
||||
|
||||
pre_tasks:
|
||||
- name: Common pre-tasks
|
||||
include: playbooks/common.yml
|
||||
tags: [ 'digitalocean', 'ec2', 'gce', 'azure', 'local', 'pre' ]
|
||||
- block:
|
||||
- name: Common pre-tasks
|
||||
include: playbooks/common.yml
|
||||
tags: [ 'digitalocean', 'ec2', 'gce', 'azure', 'local', 'pre' ]
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
roles:
|
||||
- { role: security, tags: [ 'security' ] }
|
||||
|
@ -48,25 +66,31 @@
|
|||
- { role: vpn, tags: [ 'vpn' ] }
|
||||
|
||||
post_tasks:
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass }}"
|
||||
- " {% if Store_CAKEY is defined and Store_CAKEY == 'N' %}{% else %}{{ congrats.ca_key_pass }}{% endif %}"
|
||||
- " {% if cloud_deployment is defined %}{{ congrats.ssh_access }}{% endif %}"
|
||||
tags: always
|
||||
- block:
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass }}"
|
||||
- " {% if Store_CAKEY is defined and Store_CAKEY == 'N' %}{% else %}{{ congrats.ca_key_pass }}{% endif %}"
|
||||
- " {% if cloud_deployment is defined %}{{ congrats.ssh_access }}{% endif %}"
|
||||
tags: always
|
||||
|
||||
- name: Save the CA key password
|
||||
local_action: >
|
||||
shell echo "{{ easyrsa_CA_password }}" > /tmp/ca_password
|
||||
become: no
|
||||
tags: tests
|
||||
- name: Save the CA key password
|
||||
local_action: >
|
||||
shell echo "{{ easyrsa_CA_password }}" > /tmp/ca_password
|
||||
become: no
|
||||
tags: tests
|
||||
|
||||
- name: Delete the CA key
|
||||
local_action:
|
||||
module: file
|
||||
path: "configs/{{ IP_subject_alt_name }}/pki/private/cakey.pem"
|
||||
state: absent
|
||||
become: no
|
||||
tags: always
|
||||
when: Store_CAKEY is defined and Store_CAKEY == "N"
|
||||
- name: Delete the CA key
|
||||
local_action:
|
||||
module: file
|
||||
path: "configs/{{ IP_subject_alt_name }}/pki/private/cakey.pem"
|
||||
state: absent
|
||||
become: no
|
||||
tags: always
|
||||
when: Store_CAKEY is defined and Store_CAKEY == "N"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
|
|
@ -1,138 +1,143 @@
|
|||
---
|
||||
- block:
|
||||
- set_fact:
|
||||
resource_group: "Algo_{{ region }}"
|
||||
secret: "{{ azure_secret | default(lookup('env','AZURE_SECRET'), true) }}"
|
||||
tenant: "{{ azure_tenant | default(lookup('env','AZURE_TENANT'), true) }}"
|
||||
client_id: "{{ azure_client_id | default(lookup('env','AZURE_CLIENT_ID'), true) }}"
|
||||
subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}"
|
||||
|
||||
- set_fact:
|
||||
resource_group: "Algo_{{ region }}"
|
||||
secret: "{{ azure_secret | default(lookup('env','AZURE_SECRET'), true) }}"
|
||||
tenant: "{{ azure_tenant | default(lookup('env','AZURE_TENANT'), true) }}"
|
||||
client_id: "{{ azure_client_id | default(lookup('env','AZURE_CLIENT_ID'), true) }}"
|
||||
subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}"
|
||||
- name: Create a resource group
|
||||
azure_rm_resourcegroup:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
name: "{{ resource_group }}"
|
||||
location: "{{ region }}"
|
||||
tags:
|
||||
Environment: Algo
|
||||
|
||||
- name: Create a resource group
|
||||
azure_rm_resourcegroup:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
name: "{{ resource_group }}"
|
||||
location: "{{ region }}"
|
||||
tags:
|
||||
Environment: Algo
|
||||
- name: Create a virtual network
|
||||
azure_rm_virtualnetwork:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
name: algo_net
|
||||
address_prefixes: "10.10.0.0/16"
|
||||
tags:
|
||||
Environment: Algo
|
||||
|
||||
- name: Create a virtual network
|
||||
azure_rm_virtualnetwork:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
name: algo_net
|
||||
address_prefixes: "10.10.0.0/16"
|
||||
tags:
|
||||
Environment: Algo
|
||||
- name: Create a security group
|
||||
azure_rm_securitygroup:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
name: AlgoSecGroup
|
||||
purge_rules: yes
|
||||
rules:
|
||||
- name: AllowSSH
|
||||
protocol: Tcp
|
||||
destination_port_range: 22
|
||||
access: Allow
|
||||
priority: 100
|
||||
direction: Inbound
|
||||
- name: AllowIPSEC500
|
||||
protocol: Udp
|
||||
destination_port_range: 500
|
||||
access: Allow
|
||||
priority: 110
|
||||
direction: Inbound
|
||||
- name: AllowIPSEC4500
|
||||
protocol: Udp
|
||||
destination_port_range: 4500
|
||||
access: Allow
|
||||
priority: 120
|
||||
direction: Inbound
|
||||
|
||||
- name: Create a security group
|
||||
azure_rm_securitygroup:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
name: AlgoSecGroup
|
||||
purge_rules: yes
|
||||
rules:
|
||||
- name: AllowSSH
|
||||
protocol: Tcp
|
||||
destination_port_range: 22
|
||||
access: Allow
|
||||
priority: 100
|
||||
direction: Inbound
|
||||
- name: AllowIPSEC500
|
||||
protocol: Udp
|
||||
destination_port_range: 500
|
||||
access: Allow
|
||||
priority: 110
|
||||
direction: Inbound
|
||||
- name: AllowIPSEC4500
|
||||
protocol: Udp
|
||||
destination_port_range: 4500
|
||||
access: Allow
|
||||
priority: 120
|
||||
direction: Inbound
|
||||
- name: Create a subnet
|
||||
azure_rm_subnet:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
name: algo_subnet
|
||||
address_prefix: "10.10.0.0/24"
|
||||
virtual_network: algo_net
|
||||
security_group_name: AlgoSecGroup
|
||||
tags:
|
||||
Environment: Algo
|
||||
|
||||
- name: Create a subnet
|
||||
azure_rm_subnet:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
name: algo_subnet
|
||||
address_prefix: "10.10.0.0/24"
|
||||
virtual_network: algo_net
|
||||
security_group_name: AlgoSecGroup
|
||||
tags:
|
||||
Environment: Algo
|
||||
- name: Create an instance
|
||||
azure_rm_virtualmachine:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
admin_username: ubuntu
|
||||
virtual_network: algo_net
|
||||
name: "{{ azure_server_name }}"
|
||||
ssh_password_enabled: false
|
||||
vm_size: "{{ cloud_providers.azure.size }}"
|
||||
tags:
|
||||
Environment: Algo
|
||||
ssh_public_keys:
|
||||
- { path: "/home/ubuntu/.ssh/authorized_keys", key_data: "{{ lookup('file', '{{ SSH_keys.public }}') }}" }
|
||||
image: "{{ cloud_providers.azure.image }}"
|
||||
register: azure_rm_virtualmachine
|
||||
|
||||
- name: Create an instance
|
||||
azure_rm_virtualmachine:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
admin_username: ubuntu
|
||||
virtual_network: algo_net
|
||||
name: "{{ azure_server_name }}"
|
||||
ssh_password_enabled: false
|
||||
vm_size: "{{ cloud_providers.azure.size }}"
|
||||
tags:
|
||||
Environment: Algo
|
||||
ssh_public_keys:
|
||||
- { path: "/home/ubuntu/.ssh/authorized_keys", key_data: "{{ lookup('file', '{{ SSH_keys.public }}') }}" }
|
||||
image: "{{ cloud_providers.azure.image }}"
|
||||
register: azure_rm_virtualmachine
|
||||
# To-do: Add error handling - if vm_size requested is not available, can we fall back to another, ideally with a prompt?
|
||||
|
||||
# To-do: Add error handling - if vm_size requested is not available, can we fall back to another, ideally with a prompt?
|
||||
- set_fact:
|
||||
ip_address: "{{ azure_rm_virtualmachine.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.publicIPAddress.properties.ipAddress }}"
|
||||
networkinterface_name: "{{ azure_rm_virtualmachine.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].name }}"
|
||||
|
||||
- set_fact:
|
||||
ip_address: "{{ azure_rm_virtualmachine.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.publicIPAddress.properties.ipAddress }}"
|
||||
networkinterface_name: "{{ azure_rm_virtualmachine.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].name }}"
|
||||
- name: Ensure the network interface includes all required parameters
|
||||
azure_rm_networkinterface:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
name: "{{ networkinterface_name }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
virtual_network_name: algo_net
|
||||
subnet_name: algo_subnet
|
||||
security_group_name: AlgoSecGroup
|
||||
|
||||
- name: Ensure the network interface includes all required parameters
|
||||
azure_rm_networkinterface:
|
||||
secret: "{{ secret }}"
|
||||
tenant: "{{ tenant }}"
|
||||
client_id: "{{ client_id }}"
|
||||
subscription_id: "{{ subscription_id }}"
|
||||
name: "{{ networkinterface_name }}"
|
||||
resource_group: "{{ resource_group }}"
|
||||
virtual_network_name: algo_net
|
||||
subnet_name: algo_subnet
|
||||
security_group_name: AlgoSecGroup
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ ip_address }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: azure
|
||||
ipv6_support: no
|
||||
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ ip_address }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: azure
|
||||
ipv6_support: no
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ ip_address }}"
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ ip_address }}"
|
||||
- name: Ensure the group azure exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[azure]'
|
||||
|
||||
- name: Ensure the group azure exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[azure]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[azure\]'
|
||||
regexp: "^{{ cloud_instance_ip }}.*"
|
||||
line: "{{ cloud_instance_ip }}"
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[azure\]'
|
||||
regexp: "^{{ cloud_instance_ip }}.*"
|
||||
line: "{{ cloud_instance_ip }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
|
|
@ -1,102 +1,108 @@
|
|||
- name: Set the DigitalOcean Access Token fact
|
||||
set_fact:
|
||||
do_token: "{{ do_access_token | default(lookup('env','DO_API_TOKEN'), true) }}"
|
||||
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- block:
|
||||
- name: "Delete the existing Algo SSH keys"
|
||||
- name: Set the DigitalOcean Access Token fact
|
||||
set_fact:
|
||||
do_token: "{{ do_access_token | default(lookup('env','DO_API_TOKEN'), true) }}"
|
||||
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- block:
|
||||
- name: "Delete the existing Algo SSH keys"
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
until: ssh_keys.changed != true
|
||||
retries: 10
|
||||
delay: 1
|
||||
|
||||
rescue:
|
||||
- name: Collect the fail error
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
|
||||
- debug: var=ssh_keys
|
||||
|
||||
- fail:
|
||||
msg: "Please, ensure that your API token is not read-only."
|
||||
|
||||
- name: "Upload the SSH key"
|
||||
digital_ocean:
|
||||
state: absent
|
||||
state: present
|
||||
command: ssh
|
||||
ssh_pub_key: "{{ public_key }}"
|
||||
api_token: "{{ do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
until: ssh_keys.changed != true
|
||||
retries: 10
|
||||
delay: 1
|
||||
register: do_ssh_key
|
||||
|
||||
- name: "Creating a droplet..."
|
||||
digital_ocean:
|
||||
state: present
|
||||
command: droplet
|
||||
name: "{{ do_server_name }}"
|
||||
region_id: "{{ do_region }}"
|
||||
size_id: "{{ cloud_providers.digitalocean.size }}"
|
||||
image_id: "{{ cloud_providers.digitalocean.image }}"
|
||||
ssh_key_ids: "{{ do_ssh_key.ssh_key.id }}"
|
||||
unique_name: yes
|
||||
api_token: "{{ do_token }}"
|
||||
ipv6: yes
|
||||
register: do
|
||||
|
||||
- name: Add the droplet to an inventory group
|
||||
add_host:
|
||||
name: "{{ do.droplet.ip_address }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: root
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
do_access_token: "{{ do_token }}"
|
||||
do_droplet_id: "{{ do.droplet.id }}"
|
||||
cloud_provider: digitalocean
|
||||
ipv6_support: true
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ do.droplet.ip_address }}"
|
||||
|
||||
- name: Tag the droplet
|
||||
digital_ocean_tag:
|
||||
name: "Environment:Algo"
|
||||
resource_id: "{{ do.droplet.id }}"
|
||||
api_token: "{{ do_token }}"
|
||||
state: present
|
||||
|
||||
- name: Get droplets
|
||||
uri:
|
||||
url: "https://api.digitalocean.com/v2/droplets?tag_name=Environment:Algo"
|
||||
method: GET
|
||||
status_code: 200
|
||||
headers:
|
||||
Content-Type: "application/json"
|
||||
Authorization: "Bearer {{ do_token }}"
|
||||
register: do_droplets
|
||||
|
||||
- name: Ensure the group digitalocean exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[digitalocean]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[digitalocean\]'
|
||||
regexp: "^{{ item.networks.v4[0].ip_address }}.*"
|
||||
line: "{{ item.networks.v4[0].ip_address }}"
|
||||
with_items:
|
||||
- "{{ do_droplets.json.droplets }}"
|
||||
rescue:
|
||||
- name: Collect the fail error
|
||||
digital_ocean:
|
||||
state: absent
|
||||
command: ssh
|
||||
api_token: "{{ do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: ssh_keys
|
||||
ignore_errors: yes
|
||||
|
||||
- debug: var=ssh_keys
|
||||
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
msg: "Please, ensure that your API token is not read-only."
|
||||
|
||||
- name: "Upload the SSH key"
|
||||
digital_ocean:
|
||||
state: present
|
||||
command: ssh
|
||||
ssh_pub_key: "{{ public_key }}"
|
||||
api_token: "{{ do_token }}"
|
||||
name: "{{ SSH_keys.comment }}"
|
||||
register: do_ssh_key
|
||||
|
||||
- name: "Creating a droplet..."
|
||||
digital_ocean:
|
||||
state: present
|
||||
command: droplet
|
||||
name: "{{ do_server_name }}"
|
||||
region_id: "{{ do_region }}"
|
||||
size_id: "{{ cloud_providers.digitalocean.size }}"
|
||||
image_id: "{{ cloud_providers.digitalocean.image }}"
|
||||
ssh_key_ids: "{{ do_ssh_key.ssh_key.id }}"
|
||||
unique_name: yes
|
||||
api_token: "{{ do_token }}"
|
||||
ipv6: yes
|
||||
register: do
|
||||
|
||||
- name: Add the droplet to an inventory group
|
||||
add_host:
|
||||
name: "{{ do.droplet.ip_address }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: root
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
do_access_token: "{{ do_token }}"
|
||||
do_droplet_id: "{{ do.droplet.id }}"
|
||||
cloud_provider: digitalocean
|
||||
ipv6_support: true
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ do.droplet.ip_address }}"
|
||||
|
||||
- name: Tag the droplet
|
||||
digital_ocean_tag:
|
||||
name: "Environment:Algo"
|
||||
resource_id: "{{ do.droplet.id }}"
|
||||
api_token: "{{ do_token }}"
|
||||
state: present
|
||||
|
||||
- name: Get droplets
|
||||
uri:
|
||||
url: "https://api.digitalocean.com/v2/droplets?tag_name=Environment:Algo"
|
||||
method: GET
|
||||
status_code: 200
|
||||
headers:
|
||||
Content-Type: "application/json"
|
||||
Authorization: "Bearer {{ do_token }}"
|
||||
register: do_droplets
|
||||
|
||||
- name: Ensure the group digitalocean exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[digitalocean]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[digitalocean\]'
|
||||
regexp: "^{{ item.networks.v4[0].ip_address }}.*"
|
||||
line: "{{ item.networks.v4[0].ip_address }}"
|
||||
with_items:
|
||||
- "{{ do_droplets.json.droplets }}"
|
||||
tags: always
|
||||
|
|
|
@ -1,63 +1,69 @@
|
|||
- set_fact:
|
||||
access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||
secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
stack_name: "{{ aws_server_name | replace('.', '-') }}"
|
||||
- block:
|
||||
- set_fact:
|
||||
access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||
secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
stack_name: "{{ aws_server_name | replace('.', '-') }}"
|
||||
|
||||
- name: Locate official AMI for region
|
||||
ec2_ami_find:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
|
||||
owner: "{{ cloud_providers.ec2.image.owner }}"
|
||||
sort: creationDate
|
||||
sort_order: descending
|
||||
sort_end: 1
|
||||
region: "{{ region }}"
|
||||
register: ami_search
|
||||
- name: Locate official AMI for region
|
||||
ec2_ami_find:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
|
||||
owner: "{{ cloud_providers.ec2.image.owner }}"
|
||||
sort: creationDate
|
||||
sort_order: descending
|
||||
sort_end: 1
|
||||
region: "{{ region }}"
|
||||
register: ami_search
|
||||
|
||||
- set_fact:
|
||||
ami_image: "{{ ami_search.results[0].ami_id }}"
|
||||
- set_fact:
|
||||
ami_image: "{{ ami_search.results[0].ami_id }}"
|
||||
|
||||
- include: encrypt_image.yml
|
||||
tags: [encrypted]
|
||||
- include: encrypt_image.yml
|
||||
tags: [encrypted]
|
||||
|
||||
- include: cloudformation.yml
|
||||
- include: cloudformation.yml
|
||||
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ stack.stack_outputs.PublicIP }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: ec2
|
||||
ipv6_support: yes
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ stack.stack_outputs.PublicIP }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: ec2
|
||||
ipv6_support: yes
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ stack.stack_outputs.PublicIP }}"
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ stack.stack_outputs.PublicIP }}"
|
||||
|
||||
- name: Get EC2 instances
|
||||
ec2_remote_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: "{{ region }}"
|
||||
filters:
|
||||
instance-state-name: running
|
||||
"tag:Environment": Algo
|
||||
register: algo_instances
|
||||
- name: Get EC2 instances
|
||||
ec2_remote_facts:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
region: "{{ region }}"
|
||||
filters:
|
||||
instance-state-name: running
|
||||
"tag:Environment": Algo
|
||||
register: algo_instances
|
||||
|
||||
- name: Ensure the group ec2 exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[ec2]'
|
||||
- name: Ensure the group ec2 exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[ec2]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[ec2\]'
|
||||
regexp: "^{{ item.public_ip_address }}.*"
|
||||
line: "{{ item.public_ip_address }}"
|
||||
with_items:
|
||||
- "{{ algo_instances.instances }}"
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[ec2\]'
|
||||
regexp: "^{{ item.public_ip_address }}.*"
|
||||
line: "{{ item.public_ip_address }}"
|
||||
with_items:
|
||||
- "{{ algo_instances.instances }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
|
|
@ -1,64 +1,70 @@
|
|||
- set_fact:
|
||||
credentials_file_path: "{{ credentials_file | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}"
|
||||
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
- block:
|
||||
- set_fact:
|
||||
credentials_file_path: "{{ credentials_file | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}"
|
||||
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
|
||||
- set_fact:
|
||||
credentials_file_lookup: "{{ lookup('file', '{{ credentials_file_path }}') }}"
|
||||
- set_fact:
|
||||
credentials_file_lookup: "{{ lookup('file', '{{ credentials_file_path }}') }}"
|
||||
|
||||
- set_fact:
|
||||
service_account_email: "{{ credentials_file_lookup.client_email | default(lookup('env','GCE_EMAIL')) }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}"
|
||||
- set_fact:
|
||||
service_account_email: "{{ credentials_file_lookup.client_email | default(lookup('env','GCE_EMAIL')) }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}"
|
||||
|
||||
- name: "Creating a new instance..."
|
||||
gce:
|
||||
instance_names: "{{ server_name }}"
|
||||
zone: "{{ zone }}"
|
||||
machine_type: "{{ cloud_providers.gce.size }}"
|
||||
image: "{{ cloud_providers.gce.image }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
metadata: '{"ssh-keys":"ubuntu:{{ ssh_public_key_lookup }}"}'
|
||||
# ip_forward: true
|
||||
tags:
|
||||
- "environment-algo"
|
||||
register: google_vm
|
||||
- name: "Creating a new instance..."
|
||||
gce:
|
||||
instance_names: "{{ server_name }}"
|
||||
zone: "{{ zone }}"
|
||||
machine_type: "{{ cloud_providers.gce.size }}"
|
||||
image: "{{ cloud_providers.gce.image }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file_path }}"
|
||||
project_id: "{{ project_id }}"
|
||||
metadata: '{"ssh-keys":"ubuntu:{{ ssh_public_key_lookup }}"}'
|
||||
# ip_forward: true
|
||||
tags:
|
||||
- "environment-algo"
|
||||
register: google_vm
|
||||
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: gce
|
||||
ipv6_support: no
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: ubuntu
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
cloud_provider: gce
|
||||
ipv6_support: no
|
||||
|
||||
- name: Firewall configured
|
||||
local_action:
|
||||
module: gce_net
|
||||
name: "{{ google_vm.instance_data[0].network }}"
|
||||
fwname: "algo-ikev2"
|
||||
allowed: "udp:500,4500;tcp:22"
|
||||
state: "present"
|
||||
src_range: 0.0.0.0/0
|
||||
service_account_email: "{{ credentials_file_lookup.client_email }}"
|
||||
credentials_file: "{{ credentials_file }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id }}"
|
||||
- name: Firewall configured
|
||||
local_action:
|
||||
module: gce_net
|
||||
name: "{{ google_vm.instance_data[0].network }}"
|
||||
fwname: "algo-ikev2"
|
||||
allowed: "udp:500,4500;tcp:22"
|
||||
state: "present"
|
||||
src_range: 0.0.0.0/0
|
||||
service_account_email: "{{ credentials_file_lookup.client_email }}"
|
||||
credentials_file: "{{ credentials_file }}"
|
||||
project_id: "{{ credentials_file_lookup.project_id }}"
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
|
||||
- name: Ensure the group gce exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[gce]'
|
||||
- name: Ensure the group gce exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[gce]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[gce\]'
|
||||
regexp: "^{{ google_vm.instance_data[0].public_ip }}.*"
|
||||
line: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[gce\]'
|
||||
regexp: "^{{ google_vm.instance_data[0].public_ip }}.*"
|
||||
line: "{{ google_vm.instance_data[0].public_ip }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
|
|
@ -1,28 +1,28 @@
|
|||
---
|
||||
- block:
|
||||
- include: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Gather Facts
|
||||
setup:
|
||||
tags:
|
||||
- always
|
||||
- include: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
|
||||
- include: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
- name: Install tools
|
||||
package: name="{{ item }}" state=present
|
||||
with_items:
|
||||
- "{{ tools|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
- include: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
- name: Sysctl tuning
|
||||
sysctl: name="{{ item.item }}" value="{{ item.value }}"
|
||||
with_items:
|
||||
- "{{ sysctl|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: Install tools
|
||||
package: name="{{ item }}" state=present
|
||||
with_items:
|
||||
- "{{ tools|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: Sysctl tuning
|
||||
sysctl: name="{{ item.item }}" value="{{ item.value }}"
|
||||
with_items:
|
||||
- "{{ sysctl|default([]) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
- meta: flush_handlers
|
||||
- meta: flush_handlers
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
|
|
@ -1,41 +1,46 @@
|
|||
---
|
||||
- block:
|
||||
- name: Dnsmasq installed
|
||||
package: name=dnsmasq
|
||||
|
||||
- name: Dnsmasq installed
|
||||
package: name=dnsmasq
|
||||
- name: Ensure that the dnsmasq user exist
|
||||
user: name=dnsmasq groups=nogroup append=yes state=present
|
||||
|
||||
- name: Ensure that the dnsmasq user exist
|
||||
user: name=dnsmasq groups=nogroup append=yes state=present
|
||||
- name: The dnsmasq directory created
|
||||
file: dest=/var/lib/dnsmasq state=directory mode=0755 owner=dnsmasq group=nogroup
|
||||
|
||||
- name: The dnsmasq directory created
|
||||
file: dest=/var/lib/dnsmasq state=directory mode=0755 owner=dnsmasq group=nogroup
|
||||
- include: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
|
||||
- include: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
- include: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
|
||||
- include: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
- name: Dnsmasq configured
|
||||
template: src=dnsmasq.conf.j2 dest="{{ config_prefix|default('/') }}etc/dnsmasq.conf"
|
||||
notify:
|
||||
- restart dnsmasq
|
||||
|
||||
- name: Dnsmasq configured
|
||||
template: src=dnsmasq.conf.j2 dest="{{ config_prefix|default('/') }}etc/dnsmasq.conf"
|
||||
notify:
|
||||
- restart dnsmasq
|
||||
- name: Adblock script created
|
||||
template: src=adblock.sh dest=/usr/local/sbin/adblock.sh owner=root group="{{ root_group|default('root') }}" mode=0755
|
||||
|
||||
- name: Adblock script created
|
||||
template: src=adblock.sh dest=/usr/local/sbin/adblock.sh owner=root group="{{ root_group|default('root') }}" mode=0755
|
||||
- name: Adblock script added to cron
|
||||
cron:
|
||||
name: Adblock hosts update
|
||||
minute: 10
|
||||
hour: 2
|
||||
job: /usr/local/sbin/adblock.sh
|
||||
user: dnsmasq
|
||||
|
||||
- name: Adblock script added to cron
|
||||
cron:
|
||||
name: Adblock hosts update
|
||||
minute: 10
|
||||
hour: 2
|
||||
job: /usr/local/sbin/adblock.sh
|
||||
user: dnsmasq
|
||||
- name: Update adblock hosts
|
||||
shell: >
|
||||
sudo -u dnsmasq "/usr/local/sbin/adblock.sh"
|
||||
|
||||
- name: Update adblock hosts
|
||||
shell: >
|
||||
sudo -u dnsmasq "/usr/local/sbin/adblock.sh"
|
||||
- meta: flush_handlers
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: Dnsmasq enabled and started
|
||||
service: name=dnsmasq state=started enabled=yes
|
||||
- name: Dnsmasq enabled and started
|
||||
service: name=dnsmasq state=started enabled=yes
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
|
|
@ -1,35 +1,42 @@
|
|||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ server_ip }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: "{{ server_user }}"
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
cloud_provider: local
|
||||
when: server_ip != "localhost"
|
||||
---
|
||||
- block:
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ server_ip }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: "{{ server_user }}"
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
cloud_provider: local
|
||||
when: server_ip != "localhost"
|
||||
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ server_ip }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: "{{ server_user }}"
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_connection: local
|
||||
cloud_provider: local
|
||||
when: server_ip == "localhost"
|
||||
- name: Add the instance to an inventory group
|
||||
add_host:
|
||||
name: "{{ server_ip }}"
|
||||
groups: vpn-host
|
||||
ansible_ssh_user: "{{ server_user }}"
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ansible_connection: local
|
||||
cloud_provider: local
|
||||
when: server_ip == "localhost"
|
||||
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ server_ip }}"
|
||||
- set_fact:
|
||||
cloud_instance_ip: "{{ server_ip }}"
|
||||
|
||||
- name: Ensure the group local exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[local]'
|
||||
- name: Ensure the group local exists in the dynamic inventory file
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
line: '[local]'
|
||||
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[local\]'
|
||||
regexp: "^{{ server_ip }}.*"
|
||||
line: "{{ server_ip }}"
|
||||
- name: Populate the dynamic inventory
|
||||
lineinfile:
|
||||
state: present
|
||||
dest: configs/inventory.dynamic
|
||||
insertafter: '\[local\]'
|
||||
regexp: "^{{ server_ip }}.*"
|
||||
line: "{{ server_ip }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
|
|
@ -1,96 +1,101 @@
|
|||
---
|
||||
- block:
|
||||
- name: Install tools
|
||||
apt: name="{{ item }}" state=latest
|
||||
with_items:
|
||||
- unattended-upgrades
|
||||
|
||||
- name: Install tools
|
||||
apt: name="{{ item }}" state=latest
|
||||
with_items:
|
||||
- unattended-upgrades
|
||||
- name: Configure unattended-upgrades
|
||||
template: src=50unattended-upgrades.j2 dest=/etc/apt/apt.conf.d/50unattended-upgrades owner=root group=root mode=0644
|
||||
|
||||
- name: Configure unattended-upgrades
|
||||
template: src=50unattended-upgrades.j2 dest=/etc/apt/apt.conf.d/50unattended-upgrades owner=root group=root mode=0644
|
||||
- name: Periodic upgrades configured
|
||||
template: src=10periodic.j2 dest=/etc/apt/apt.conf.d/10periodic owner=root group=root mode=0644
|
||||
|
||||
- name: Periodic upgrades configured
|
||||
template: src=10periodic.j2 dest=/etc/apt/apt.conf.d/10periodic owner=root group=root mode=0644
|
||||
- name: Find directories for minimizing access
|
||||
stat:
|
||||
path: "{{ item }}"
|
||||
register: minimize_access_directories
|
||||
with_items:
|
||||
- '/usr/local/sbin'
|
||||
- '/usr/local/bin'
|
||||
- '/usr/sbin'
|
||||
- '/usr/bin'
|
||||
- '/sbin'
|
||||
- '/bin'
|
||||
|
||||
- name: Find directories for minimizing access
|
||||
stat:
|
||||
path: "{{ item }}"
|
||||
register: minimize_access_directories
|
||||
with_items:
|
||||
- '/usr/local/sbin'
|
||||
- '/usr/local/bin'
|
||||
- '/usr/sbin'
|
||||
- '/usr/bin'
|
||||
- '/sbin'
|
||||
- '/bin'
|
||||
- name: Minimize access
|
||||
file: path='{{ item.stat.path }}' mode='go-w' recurse=yes
|
||||
when: item.stat.isdir
|
||||
with_items: "{{ minimize_access_directories.results }}"
|
||||
no_log: True
|
||||
|
||||
- name: Minimize access
|
||||
file: path='{{ item.stat.path }}' mode='go-w' recurse=yes
|
||||
when: item.stat.isdir
|
||||
with_items: "{{ minimize_access_directories.results }}"
|
||||
no_log: True
|
||||
- name: Change shadow ownership to root and mode to 0600
|
||||
file: dest='/etc/shadow' owner=root group=root mode=0600
|
||||
|
||||
- name: Change shadow ownership to root and mode to 0600
|
||||
file: dest='/etc/shadow' owner=root group=root mode=0600
|
||||
- name: change su-binary to only be accessible to user and group root
|
||||
file: dest='/bin/su' owner=root group=root mode=0750
|
||||
|
||||
- name: change su-binary to only be accessible to user and group root
|
||||
file: dest='/bin/su' owner=root group=root mode=0750
|
||||
- name: Collect Use of privileged commands
|
||||
shell: >
|
||||
/usr/bin/find {/usr/local/sbin,/usr/local/bin,/sbin,/bin,/usr/sbin,/usr/bin} -xdev \( -perm -4000 -o -perm -2000 \) -type f | awk '{print "-a always,exit -F path=" $1 " -F perm=x -F auid>=500 -F auid!=4294967295 -k privileged" }'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: privileged_programs
|
||||
|
||||
- name: Collect Use of privileged commands
|
||||
shell: >
|
||||
/usr/bin/find {/usr/local/sbin,/usr/local/bin,/sbin,/bin,/usr/sbin,/usr/bin} -xdev \( -perm -4000 -o -perm -2000 \) -type f | awk '{print "-a always,exit -F path=" $1 " -F perm=x -F auid>=500 -F auid!=4294967295 -k privileged" }'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: privileged_programs
|
||||
# Core dumps
|
||||
|
||||
# Core dumps
|
||||
- name: Restrict core dumps (with PAM)
|
||||
lineinfile: dest=/etc/security/limits.conf line="* hard core 0" state=present
|
||||
|
||||
- name: Restrict core dumps (with PAM)
|
||||
lineinfile: dest=/etc/security/limits.conf line="* hard core 0" state=present
|
||||
- name: Restrict core dumps (with sysctl)
|
||||
sysctl: name=fs.suid_dumpable value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
|
||||
- name: Restrict core dumps (with sysctl)
|
||||
sysctl: name=fs.suid_dumpable value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
# Kernel fixes
|
||||
|
||||
# Kernel fixes
|
||||
- name: Disable Source Routed Packet Acceptance
|
||||
sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
with_items:
|
||||
- net.ipv4.conf.all.accept_source_route
|
||||
- net.ipv4.conf.default.accept_source_route
|
||||
notify:
|
||||
- flush routing cache
|
||||
|
||||
- name: Disable Source Routed Packet Acceptance
|
||||
sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
with_items:
|
||||
- net.ipv4.conf.all.accept_source_route
|
||||
- net.ipv4.conf.default.accept_source_route
|
||||
notify:
|
||||
- flush routing cache
|
||||
- name: Disable ICMP Redirect Acceptance
|
||||
sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
with_items:
|
||||
- net.ipv4.conf.all.accept_redirects
|
||||
- net.ipv4.conf.default.accept_redirects
|
||||
|
||||
- name: Disable ICMP Redirect Acceptance
|
||||
sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
with_items:
|
||||
- net.ipv4.conf.all.accept_redirects
|
||||
- net.ipv4.conf.default.accept_redirects
|
||||
- name: Disable Secure ICMP Redirect Acceptance
|
||||
sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
with_items:
|
||||
- net.ipv4.conf.all.secure_redirects
|
||||
- net.ipv4.conf.default.secure_redirects
|
||||
notify:
|
||||
- flush routing cache
|
||||
|
||||
- name: Disable Secure ICMP Redirect Acceptance
|
||||
sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
with_items:
|
||||
- net.ipv4.conf.all.secure_redirects
|
||||
- net.ipv4.conf.default.secure_redirects
|
||||
notify:
|
||||
- flush routing cache
|
||||
- name: Enable Bad Error Message Protection
|
||||
sysctl: name=net.ipv4.icmp_ignore_bogus_error_responses value=1 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
notify:
|
||||
- flush routing cache
|
||||
|
||||
- name: Enable Bad Error Message Protection
|
||||
sysctl: name=net.ipv4.icmp_ignore_bogus_error_responses value=1 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
notify:
|
||||
- flush routing cache
|
||||
- name: Enable RFC-recommended Source Route Validation
|
||||
sysctl: name="{{item}}" value=1 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
with_items:
|
||||
- net.ipv4.conf.all.rp_filter
|
||||
- net.ipv4.conf.default.rp_filter
|
||||
notify:
|
||||
- flush routing cache
|
||||
|
||||
- name: Enable RFC-recommended Source Route Validation
|
||||
sysctl: name="{{item}}" value=1 ignoreerrors=yes sysctl_set=yes reload=yes state=present
|
||||
with_items:
|
||||
- net.ipv4.conf.all.rp_filter
|
||||
- net.ipv4.conf.default.rp_filter
|
||||
notify:
|
||||
- flush routing cache
|
||||
- name: Do not send ICMP redirects (we are not a router)
|
||||
sysctl: name=net.ipv4.conf.all.send_redirects value=0
|
||||
|
||||
- name: Do not send ICMP redirects (we are not a router)
|
||||
sysctl: name=net.ipv4.conf.all.send_redirects value=0
|
||||
|
||||
- name: SSH config
|
||||
template: src=sshd_config.j2 dest=/etc/ssh/sshd_config owner=root group=root mode=0644
|
||||
notify:
|
||||
- restart ssh
|
||||
- name: SSH config
|
||||
template: src=sshd_config.j2 dest=/etc/ssh/sshd_config owner=root group=root mode=0644
|
||||
notify:
|
||||
- restart ssh
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
|
|
@ -1,77 +1,82 @@
|
|||
---
|
||||
- block:
|
||||
- name: Ensure that the sshd_config file has desired options
|
||||
blockinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
marker: '# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role'
|
||||
block: |
|
||||
Match Group algo
|
||||
AllowTcpForwarding local
|
||||
AllowAgentForwarding no
|
||||
AllowStreamLocalForwarding no
|
||||
PermitTunnel no
|
||||
X11Forwarding no
|
||||
notify:
|
||||
- restart ssh
|
||||
|
||||
- name: Ensure that the sshd_config file has desired options
|
||||
blockinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
marker: '# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role'
|
||||
block: |
|
||||
Match Group algo
|
||||
AllowTcpForwarding local
|
||||
AllowAgentForwarding no
|
||||
AllowStreamLocalForwarding no
|
||||
PermitTunnel no
|
||||
X11Forwarding no
|
||||
notify:
|
||||
- restart ssh
|
||||
- name: Ensure that the algo group exist
|
||||
group: name=algo state=present
|
||||
|
||||
- name: Ensure that the algo group exist
|
||||
group: name=algo state=present
|
||||
- name: Ensure that the jail directory exist
|
||||
file: path=/var/jail/ state=directory mode=0755 owner=root group="{{ root_group|default('root') }}"
|
||||
|
||||
- name: Ensure that the jail directory exist
|
||||
file: path=/var/jail/ state=directory mode=0755 owner=root group="{{ root_group|default('root') }}"
|
||||
- name: Ensure that the SSH users exist
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
groups: algo
|
||||
home: '/var/jail/{{ item }}'
|
||||
createhome: yes
|
||||
generate_ssh_key: yes
|
||||
shell: /bin/false
|
||||
ssh_key_type: ecdsa
|
||||
ssh_key_bits: 256
|
||||
ssh_key_comment: '{{ item }}@{{ IP_subject_alt_name }}'
|
||||
ssh_key_passphrase: "{{ easyrsa_p12_export_password }}"
|
||||
state: present
|
||||
append: yes
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Ensure that the SSH users exist
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
groups: algo
|
||||
home: '/var/jail/{{ item }}'
|
||||
createhome: yes
|
||||
generate_ssh_key: yes
|
||||
shell: /bin/false
|
||||
ssh_key_type: ecdsa
|
||||
ssh_key_bits: 256
|
||||
ssh_key_comment: '{{ item }}@{{ IP_subject_alt_name }}'
|
||||
ssh_key_passphrase: "{{ easyrsa_p12_export_password }}"
|
||||
state: present
|
||||
append: yes
|
||||
with_items: "{{ users }}"
|
||||
- name: The authorized keys file created
|
||||
file:
|
||||
src: '/var/jail/{{ item }}/.ssh/id_ecdsa.pub'
|
||||
dest: '/var/jail/{{ item }}/.ssh/authorized_keys'
|
||||
owner: "{{ item }}"
|
||||
group: "{{ item }}"
|
||||
state: link
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: The authorized keys file created
|
||||
file:
|
||||
src: '/var/jail/{{ item }}/.ssh/id_ecdsa.pub'
|
||||
dest: '/var/jail/{{ item }}/.ssh/authorized_keys'
|
||||
owner: "{{ item }}"
|
||||
group: "{{ item }}"
|
||||
state: link
|
||||
with_items: "{{ users }}"
|
||||
- name: Generate SSH fingerprints
|
||||
shell: >
|
||||
ssh-keyscan {{ IP_subject_alt_name }} 2>/dev/null
|
||||
register: ssh_fingerprints
|
||||
|
||||
- name: Generate SSH fingerprints
|
||||
shell: >
|
||||
ssh-keyscan {{ IP_subject_alt_name }} 2>/dev/null
|
||||
register: ssh_fingerprints
|
||||
- name: Fetch users SSH private keys
|
||||
fetch: src='/var/jail/{{ item }}/.ssh/id_ecdsa' dest=configs/{{ IP_subject_alt_name }}/{{ item }}.ssh.pem flat=yes
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Fetch users SSH private keys
|
||||
fetch: src='/var/jail/{{ item }}/.ssh/id_ecdsa' dest=configs/{{ IP_subject_alt_name }}/{{ item }}.ssh.pem flat=yes
|
||||
with_items: "{{ users }}"
|
||||
- name: Change mode for SSH private keys
|
||||
local_action: file path=configs/{{ IP_subject_alt_name }}/{{ item }}.ssh.pem mode=0600
|
||||
with_items: "{{ users }}"
|
||||
become: false
|
||||
|
||||
- name: Change mode for SSH private keys
|
||||
local_action: file path=configs/{{ IP_subject_alt_name }}/{{ item }}.ssh.pem mode=0600
|
||||
with_items: "{{ users }}"
|
||||
become: false
|
||||
- name: Fetch the known_hosts file
|
||||
local_action:
|
||||
module: template
|
||||
src: known_hosts.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/known_hosts
|
||||
become: no
|
||||
|
||||
- name: Fetch the known_hosts file
|
||||
local_action:
|
||||
module: template
|
||||
src: known_hosts.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/known_hosts
|
||||
become: no
|
||||
|
||||
- name: Build the client ssh config
|
||||
local_action:
|
||||
module: template
|
||||
src: ssh_config.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/{{ item }}.ssh_config
|
||||
mode: 0600
|
||||
become: no
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- name: Build the client ssh config
|
||||
local_action:
|
||||
module: template
|
||||
src: ssh_config.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/{{ item }}.ssh_config
|
||||
mode: 0600
|
||||
become: no
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
|
|
@ -1,31 +1,36 @@
|
|||
---
|
||||
- block:
|
||||
- name: Ensure that the strongswan group exist
|
||||
group: name=strongswan state=present
|
||||
|
||||
- name: Ensure that the strongswan group exist
|
||||
group: name=strongswan state=present
|
||||
- name: Ensure that the strongswan user exist
|
||||
user: name=strongswan group=strongswan state=present
|
||||
|
||||
- name: Ensure that the strongswan user exist
|
||||
user: name=strongswan group=strongswan state=present
|
||||
- include: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
|
||||
- include: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
- include: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
|
||||
- include: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
- name: Install strongSwan
|
||||
package: name=strongswan state=present
|
||||
|
||||
- name: Install strongSwan
|
||||
package: name=strongswan state=present
|
||||
- name: Get StrongSwan versions
|
||||
shell: >
|
||||
ipsec --versioncode | grep -oE "^U([0-9]*|\.)*" | sed "s/^U\|\.//g"
|
||||
register: strongswan_version
|
||||
|
||||
- name: Get StrongSwan versions
|
||||
shell: >
|
||||
ipsec --versioncode | grep -oE "^U([0-9]*|\.)*" | sed "s/^U\|\.//g"
|
||||
register: strongswan_version
|
||||
- include: ipec_configuration.yml
|
||||
- include: openssl.yml
|
||||
- include: distribute_keys.yml
|
||||
- include: client_configs.yml
|
||||
|
||||
- include: ipec_configuration.yml
|
||||
- include: openssl.yml
|
||||
- include: distribute_keys.yml
|
||||
- include: client_configs.yml
|
||||
- meta: flush_handlers
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: strongSwan started
|
||||
service: name=strongswan state=started
|
||||
- name: strongSwan started
|
||||
service: name=strongswan state=started
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
|
345
users.yml
345
users.yml
|
@ -6,27 +6,33 @@
|
|||
- config.cfg
|
||||
|
||||
tasks:
|
||||
- name: Add the server to the vpn-host group
|
||||
add_host:
|
||||
hostname: "{{ server_ip }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: "{{ server_user }}"
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ssh_tunneling_enabled: "{{ ssh_tunneling_enabled }}"
|
||||
easyrsa_CA_password: "{{ easyrsa_CA_password }}"
|
||||
IP_subject: "{{ IP_subject }}"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
- block:
|
||||
- name: Add the server to the vpn-host group
|
||||
add_host:
|
||||
hostname: "{{ server_ip }}"
|
||||
groupname: vpn-host
|
||||
ansible_ssh_user: "{{ server_user }}"
|
||||
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||
ssh_tunneling_enabled: "{{ ssh_tunneling_enabled }}"
|
||||
easyrsa_CA_password: "{{ easyrsa_CA_password }}"
|
||||
IP_subject: "{{ IP_subject }}"
|
||||
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||
|
||||
- name: Wait until SSH becomes ready...
|
||||
local_action:
|
||||
module: wait_for
|
||||
port: 22
|
||||
host: "{{ server_ip }}"
|
||||
search_regex: "OpenSSH"
|
||||
delay: 10
|
||||
timeout: 320
|
||||
state: present
|
||||
become: false
|
||||
- name: Wait until SSH becomes ready...
|
||||
local_action:
|
||||
module: wait_for
|
||||
port: 22
|
||||
host: "{{ server_ip }}"
|
||||
search_regex: "OpenSSH"
|
||||
delay: 10
|
||||
timeout: 320
|
||||
state: present
|
||||
become: false
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
- name: User management
|
||||
hosts: vpn-host
|
||||
|
@ -37,171 +43,188 @@
|
|||
- roles/vpn/defaults/main.yml
|
||||
|
||||
pre_tasks:
|
||||
- name: Common pre-tasks
|
||||
include: playbooks/common.yml
|
||||
- block:
|
||||
- name: Common pre-tasks
|
||||
include: playbooks/common.yml
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
roles:
|
||||
- { role: ssh_tunneling, tags: [ 'ssh_tunneling' ], when: ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y" }
|
||||
|
||||
tasks:
|
||||
- block:
|
||||
- name: Gather Facts
|
||||
setup:
|
||||
|
||||
- name: Gather Facts
|
||||
setup:
|
||||
- name: Checking the signature algorithm
|
||||
local_action: >
|
||||
shell openssl x509 -text -in certs/{{ IP_subject_alt_name }}.crt | grep 'Signature Algorithm' | head -n1
|
||||
become: no
|
||||
register: sig_algo
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
|
||||
- name: Checking the signature algorithm
|
||||
local_action: >
|
||||
shell openssl x509 -text -in certs/{{ IP_subject_alt_name }}.crt | grep 'Signature Algorithm' | head -n1
|
||||
become: no
|
||||
register: sig_algo
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
- name: Change the algorithm to RSA
|
||||
set_fact:
|
||||
algo_params: "rsa:2048"
|
||||
when: '"ecdsa" not in sig_algo.stdout'
|
||||
|
||||
- name: Change the algorithm to RSA
|
||||
set_fact:
|
||||
algo_params: "rsa:2048"
|
||||
when: '"ecdsa" not in sig_algo.stdout'
|
||||
- name: Build the client's pair
|
||||
local_action: >
|
||||
shell openssl req -utf8 -new -newkey {{ algo_params | default('ec:ecparams/prime256v1.pem') }} -config openssl.cnf -keyout private/{{ item }}.key -out reqs/{{ item }}.req -nodes -passin pass:"{{ easyrsa_CA_password }}" -subj "/CN={{ item }}" -batch &&
|
||||
openssl ca -utf8 -in reqs/{{ item }}.req -out certs/{{ item }}.crt -config openssl.cnf -days 3650 -batch -passin pass:"{{ easyrsa_CA_password }}" -subj "/CN={{ item }}" &&
|
||||
touch certs/{{ item }}_crt_generated
|
||||
become: no
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
creates: certs/{{ item }}_crt_generated
|
||||
environment:
|
||||
subjectAltName: "DNS:{{ item }}"
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Build the client's pair
|
||||
local_action: >
|
||||
shell openssl req -utf8 -new -newkey {{ algo_params | default('ec:ecparams/prime256v1.pem') }} -config openssl.cnf -keyout private/{{ item }}.key -out reqs/{{ item }}.req -nodes -passin pass:"{{ easyrsa_CA_password }}" -subj "/CN={{ item }}" -batch &&
|
||||
openssl ca -utf8 -in reqs/{{ item }}.req -out certs/{{ item }}.crt -config openssl.cnf -days 3650 -batch -passin pass:"{{ easyrsa_CA_password }}" -subj "/CN={{ item }}" &&
|
||||
touch certs/{{ item }}_crt_generated
|
||||
become: no
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
creates: certs/{{ item }}_crt_generated
|
||||
environment:
|
||||
subjectAltName: "DNS:{{ item }}"
|
||||
with_items: "{{ users }}"
|
||||
- name: Build the client's p12
|
||||
local_action: >
|
||||
shell openssl pkcs12 -in certs/{{ item }}.crt -inkey private/{{ item }}.key -export -name {{ item }} -out private/{{ item }}.p12 -certfile cacert.pem -passout pass:"{{ easyrsa_p12_export_password }}"
|
||||
become: no
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Build the client's p12
|
||||
local_action: >
|
||||
shell openssl pkcs12 -in certs/{{ item }}.crt -inkey private/{{ item }}.key -export -name {{ item }} -out private/{{ item }}.p12 -certfile cacert.pem -passout pass:"{{ easyrsa_p12_export_password }}"
|
||||
become: no
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
with_items: "{{ users }}"
|
||||
- name: Copy the p12 certificates
|
||||
local_action:
|
||||
module: copy
|
||||
src: "configs/{{ IP_subject_alt_name }}/pki/private/{{ item }}.p12"
|
||||
dest: "configs/{{ IP_subject_alt_name }}/{{ item }}.p12"
|
||||
mode: 0600
|
||||
become: no
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
|
||||
- name: Copy the p12 certificates
|
||||
local_action:
|
||||
module: copy
|
||||
src: "configs/{{ IP_subject_alt_name }}/pki/private/{{ item }}.p12"
|
||||
dest: "configs/{{ IP_subject_alt_name }}/{{ item }}.p12"
|
||||
mode: 0600
|
||||
become: no
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- name: Get active users
|
||||
local_action: >
|
||||
shell grep ^V index.txt | grep -v "{{ IP_subject_alt_name }}" | awk '{print $5}' | sed 's/\/CN=//g'
|
||||
become: no
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
register: valid_certs
|
||||
|
||||
- name: Get active users
|
||||
local_action: >
|
||||
shell grep ^V index.txt | grep -v "{{ IP_subject_alt_name }}" | awk '{print $5}' | sed 's/\/CN=//g'
|
||||
become: no
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
register: valid_certs
|
||||
- name: Revoke non-existing users
|
||||
local_action: >
|
||||
shell openssl ca -config openssl.cnf -passin pass:"{{ easyrsa_CA_password }}" -revoke certs/{{ item }}.crt &&
|
||||
openssl ca -gencrl -config openssl.cnf -passin pass:"{{ easyrsa_CA_password }}" -revoke certs/{{ item }}.crt -out crl/{{ item }}.crt
|
||||
touch crl/{{ item }}_revoked
|
||||
become: no
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
creates: crl/{{ item }}_revoked
|
||||
environment:
|
||||
subjectAltName: "DNS:{{ item }}"
|
||||
when: item not in users
|
||||
with_items: "{{ valid_certs.stdout_lines }}"
|
||||
|
||||
- name: Revoke non-existing users
|
||||
local_action: >
|
||||
shell openssl ca -config openssl.cnf -passin pass:"{{ easyrsa_CA_password }}" -revoke certs/{{ item }}.crt &&
|
||||
openssl ca -gencrl -config openssl.cnf -passin pass:"{{ easyrsa_CA_password }}" -revoke certs/{{ item }}.crt -out crl/{{ item }}.crt
|
||||
touch crl/{{ item }}_revoked
|
||||
become: no
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
creates: crl/{{ item }}_revoked
|
||||
environment:
|
||||
subjectAltName: "DNS:{{ item }}"
|
||||
when: item not in users
|
||||
with_items: "{{ valid_certs.stdout_lines }}"
|
||||
- name: Copy the revoked certificates to the vpn server
|
||||
copy:
|
||||
src: configs/{{ IP_subject_alt_name }}/pki/crl/{{ item }}.crt
|
||||
dest: "{{ config_prefix|default('/') }}etc/ipsec.d/crls/{{ item }}.crt"
|
||||
when: item not in users
|
||||
with_items: "{{ valid_certs.stdout_lines }}"
|
||||
notify:
|
||||
- rereadcrls
|
||||
|
||||
- name: Copy the revoked certificates to the vpn server
|
||||
copy:
|
||||
src: configs/{{ IP_subject_alt_name }}/pki/crl/{{ item }}.crt
|
||||
dest: "{{ config_prefix|default('/') }}etc/ipsec.d/crls/{{ item }}.crt"
|
||||
when: item not in users
|
||||
with_items: "{{ valid_certs.stdout_lines }}"
|
||||
notify:
|
||||
- rereadcrls
|
||||
- name: Register p12 PayloadContent
|
||||
local_action: >
|
||||
shell cat private/{{ item }}.p12 | base64
|
||||
register: PayloadContent
|
||||
become: no
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Register p12 PayloadContent
|
||||
local_action: >
|
||||
shell cat private/{{ item }}.p12 | base64
|
||||
register: PayloadContent
|
||||
become: no
|
||||
args:
|
||||
chdir: "configs/{{ IP_subject_alt_name }}/pki/"
|
||||
with_items: "{{ users }}"
|
||||
- name: Set facts for mobileconfigs
|
||||
set_fact:
|
||||
proxy_enabled: false
|
||||
PayloadContentCA: "{{ lookup('file' , 'configs/{{ IP_subject_alt_name }}/pki/cacert.pem')|b64encode }}"
|
||||
|
||||
- name: Set facts for mobileconfigs
|
||||
set_fact:
|
||||
proxy_enabled: false
|
||||
PayloadContentCA: "{{ lookup('file' , 'configs/{{ IP_subject_alt_name }}/pki/cacert.pem')|b64encode }}"
|
||||
- name: Build the mobileconfigs
|
||||
local_action:
|
||||
module: template
|
||||
src: roles/vpn/templates/mobileconfig.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/{{ item.0 }}.mobileconfig
|
||||
mode: 0600
|
||||
become: no
|
||||
with_together:
|
||||
- "{{ users }}"
|
||||
- "{{ PayloadContent.results }}"
|
||||
no_log: True
|
||||
|
||||
- name: Build the mobileconfigs
|
||||
local_action:
|
||||
module: template
|
||||
src: roles/vpn/templates/mobileconfig.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/{{ item.0 }}.mobileconfig
|
||||
mode: 0600
|
||||
become: no
|
||||
with_together:
|
||||
- "{{ users }}"
|
||||
- "{{ PayloadContent.results }}"
|
||||
no_log: True
|
||||
- name: Build the client ipsec config file
|
||||
local_action:
|
||||
module: template
|
||||
src: roles/vpn/templates/client_ipsec.conf.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/ipsec_{{ item }}.conf
|
||||
mode: 0600
|
||||
become: no
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
|
||||
- name: Build the client ipsec config file
|
||||
local_action:
|
||||
module: template
|
||||
src: roles/vpn/templates/client_ipsec.conf.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/ipsec_{{ item }}.conf
|
||||
mode: 0600
|
||||
become: no
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- name: Build the client ipsec secret file
|
||||
local_action:
|
||||
module: template
|
||||
src: roles/vpn/templates/client_ipsec.secrets.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/ipsec_{{ item }}.secrets
|
||||
mode: 0600
|
||||
become: no
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
|
||||
- name: Build the client ipsec secret file
|
||||
local_action:
|
||||
module: template
|
||||
src: roles/vpn/templates/client_ipsec.secrets.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/ipsec_{{ item }}.secrets
|
||||
mode: 0600
|
||||
become: no
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- name: Build the windows client powershell script
|
||||
local_action:
|
||||
module: template
|
||||
src: roles/vpn/templates/client_windows.ps1.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/windows_{{ item }}.ps1
|
||||
mode: 0600
|
||||
become: no
|
||||
when: Win10_Enabled is defined and Win10_Enabled == "Y"
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Build the windows client powershell script
|
||||
local_action:
|
||||
module: template
|
||||
src: roles/vpn/templates/client_windows.ps1.j2
|
||||
dest: configs/{{ IP_subject_alt_name }}/windows_{{ item }}.ps1
|
||||
mode: 0600
|
||||
become: no
|
||||
when: Win10_Enabled is defined and Win10_Enabled == "Y"
|
||||
with_items: "{{ users }}"
|
||||
# SSH
|
||||
|
||||
# SSH
|
||||
- name: SSH | Get active system users
|
||||
shell: >
|
||||
getent group algo | cut -f4 -d: | sed "s/,/\n/g"
|
||||
register: valid_users
|
||||
when: ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y"
|
||||
|
||||
- name: SSH | Get active system users
|
||||
shell: >
|
||||
getent group algo | cut -f4 -d: | sed "s/,/\n/g"
|
||||
register: valid_users
|
||||
when: ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y"
|
||||
|
||||
- name: SSH | Delete non-existing users
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
remove: yes
|
||||
force: yes
|
||||
when: item not in users and ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y"
|
||||
with_items: "{{ valid_users.stdout_lines | default('null') }}"
|
||||
- name: SSH | Delete non-existing users
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
remove: yes
|
||||
force: yes
|
||||
when: item not in users and ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y"
|
||||
with_items: "{{ valid_users.stdout_lines | default('null') }}"
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
post_tasks:
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass }}"
|
||||
tags: always
|
||||
- block:
|
||||
- debug:
|
||||
msg:
|
||||
- "{{ congrats.common.split('\n') }}"
|
||||
- " {{ congrats.p12_pass }}"
|
||||
tags: always
|
||||
rescue:
|
||||
- debug: var=fail_hint
|
||||
tags: always
|
||||
- fail:
|
||||
tags: always
|
||||
|
||||
handlers:
|
||||
- name: rereadcrls
|
||||
|
|
Loading…
Add table
Reference in a new issue