From 02427910de8f6765c91ac5084e8712ded7dbe78c Mon Sep 17 00:00:00 2001 From: Jack Ivanov <17044561+jackivanov@users.noreply.github.com> Date: Fri, 2 Mar 2018 15:55:54 +0300 Subject: [PATCH] Ansible 2.4, Lightsail, Scaleway, DreamCompute (OpenStack) integration (#804) * Move to ansible-2.4.3 * Add Lightsail support #623 * Fixing the EC2 deployment * Scaleway integration #623 * OpenStack cloud provider (DreamCompute optimised) #623 * Remove the security role * Enable unattended-upgrades for clouds * New requirements to make Azure and GCE work --- .travis.yml | 2 +- algo | 137 ++++- config.cfg | 10 + deploy.yml | 14 +- library/digital_ocean_tag.py | 197 +++---- library/lightsail.py | 551 ++++++++++++++++++ playbooks/common.yml | 6 +- playbooks/freebsd.yml | 2 +- playbooks/post.yml | 2 +- requirements.txt | 7 +- roles/client/tasks/main.yml | 2 +- roles/client/tasks/systems/main.yml | 8 +- roles/cloud-ec2/tasks/main.yml | 6 +- roles/cloud-lightsail/tasks/main.yml | 52 ++ roles/cloud-openstack/tasks/main.yml | 87 +++ roles/cloud-scaleway/tasks/main.yml | 128 ++++ roles/common/tasks/main.yml | 4 +- roles/common/tasks/ubuntu.yml | 70 ++- roles/common/tasks/unattended-upgrades.yml | 21 + .../templates/10periodic.j2 | 2 +- .../templates/50unattended-upgrades.j2 | 4 +- roles/dns_adblocking/tasks/main.yml | 4 +- roles/security/handlers/main.yml | 5 - roles/security/meta/main.yml | 4 - roles/security/tasks/main.yml | 161 ----- roles/security/templates/sshd_config.j2 | 51 -- roles/vpn/tasks/main.yml | 12 +- roles/vpn/tasks/ubuntu.yml | 2 +- users.yml | 2 +- 29 files changed, 1123 insertions(+), 430 deletions(-) create mode 100644 library/lightsail.py create mode 100644 roles/cloud-lightsail/tasks/main.yml create mode 100644 roles/cloud-openstack/tasks/main.yml create mode 100644 roles/cloud-scaleway/tasks/main.yml create mode 100644 roles/common/tasks/unattended-upgrades.yml rename roles/{security => common}/templates/10periodic.j2 (76%) rename roles/{security => common}/templates/50unattended-upgrades.j2 (97%) delete mode 100644 roles/security/handlers/main.yml delete mode 100644 roles/security/meta/main.yml delete mode 100644 roles/security/tasks/main.yml delete mode 100644 roles/security/templates/sshd_config.j2 diff --git a/.travis.yml b/.travis.yml index c751a6e..ae0adc4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -52,7 +52,7 @@ script: # - shellcheck algo # - ansible-lint deploy.yml users.yml deploy_client.yml - ansible-playbook deploy.yml --syntax-check - - ansible-playbook deploy.yml -t local,vpn,dns,ssh_tunneling,security,tests -e "server_ip=$LXC_IP server_user=root IP_subject_alt_name=$LXC_IP local_dns=Y" + - ansible-playbook deploy.yml -t local,vpn,dns,ssh_tunneling,tests -e "server_ip=$LXC_IP server_user=root IP_subject_alt_name=$LXC_IP local_dns=Y" after_script: - ./tests/update-users.sh diff --git a/algo b/algo index 392464e..dca852c 100755 --- a/algo +++ b/algo @@ -48,12 +48,6 @@ Do you want each user to have their own account for SSH tunneling? ssh_tunneling_enabled=${ssh_tunneling_enabled:-n} if [[ "$ssh_tunneling_enabled" =~ ^(y|Y)$ ]]; then ROLES+=" ssh_tunneling"; fi -read -p " -Do you want to apply operating system security enhancements on the server? (warning: replaces your sshd_config) -[y/N]: " -r security_enabled -security_enabled=${security_enabled:-n} -if [[ "$security_enabled" =~ ^(y|Y)$ ]]; then ROLES+=" security"; fi - read -p " Do you want the VPN to support Windows 10 or Linux Desktop clients? (enables compatible ciphers and key exchange, less secure) [y/N]: " -r Win10_Enabled @@ -290,6 +284,115 @@ Enter the number of your desired region: EXTRA_VARS="aws_access_key=$aws_access_key aws_secret_key=$aws_secret_key aws_server_name=$aws_server_name ssh_public_key=$ssh_public_key region=$region" } +lightsail () { +read -p " +Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html) +Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md). +$ADDITIONAL_PROMPT +[AKIA...]: " -rs aws_access_key + +read -p " + +Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html) +$ADDITIONAL_PROMPT +[ABCD...]: " -rs aws_secret_key + +read -p " + +Name the vpn server: +[algo.local]: " -r algo_server_name + algo_server_name=${algo_server_name:-algo.local} + + read -p " + + What region should the server be located in? + 1. us-east-1 US East (N. Virginia) + 2. us-east-2 US East (Ohio) + 3. us-west-1 US West (N. California) + 4. us-west-2 US West (Oregon) + 5. ap-south-1 Asia Pacific (Mumbai) + 6. ap-northeast-2 Asia Pacific (Seoul) + 7. ap-southeast-1 Asia Pacific (Singapore) + 8. ap-southeast-2 Asia Pacific (Sydney) + 9. ap-northeast-1 Asia Pacific (Tokyo) + 10. eu-central-1 EU (Frankfurt) + 11. eu-west-1 EU (Ireland) + 12. eu-west-2 EU (London) +Enter the number of your desired region: +[1]: " -r algo_region +algo_region=${algo_region:-1} + + case "$algo_region" in + 1) region="us-east-1" ;; + 2) region="us-east-2" ;; + 3) region="us-west-1" ;; + 4) region="us-west-2" ;; + 5) region="ap-south-1" ;; + 6) region="ap-northeast-2" ;; + 7) region="ap-southeast-1" ;; + 8) region="ap-southeast-2" ;; + 9) region="ap-northeast-1" ;; + 10) region="eu-central-1" ;; + 11) region="eu-west-1" ;; + 12) region="eu-west-2";; + esac + + ROLES="lightsail vpn cloud" + EXTRA_VARS="aws_access_key=$aws_access_key aws_secret_key=$aws_secret_key algo_server_name=$algo_server_name region=$region" +} + +scaleway () { +read -p " +Enter your auth token (https://www.scaleway.com/docs/generate-an-api-token/) +$ADDITIONAL_PROMPT +[...]: " -rs scaleway_auth_token + +read -p " + +Enter your organization name (https://cloud.scaleway.com/#/billing) +$ADDITIONAL_PROMPT +[...]: " -rs scaleway_organization + +read -p " + +Name the vpn server: +[algo.local]: " -r algo_server_name + algo_server_name=${algo_server_name:-algo.local} + + read -p " + + What region should the server be located in? + 1. par1 Paris + 2. ams1 Amsterdam +Enter the number of your desired region: +[1]: " -r algo_region +algo_region=${algo_region:-1} + + case "$algo_region" in + 1) region="par1" ;; + 2) region="ams1" ;; + esac + + ROLES="scaleway vpn cloud" + EXTRA_VARS="scaleway_auth_token=$scaleway_auth_token scaleway_organization=\"$scaleway_organization\" algo_server_name=$algo_server_name algo_region=$region" +} + +openstack () { +read -p " +Enter the local path to your credentials OpenStack RC file (Can be donloaded from the OpenStack dashboard->Compute->API Access) +[...]: " -r os_rc + +read -p " + +Name the vpn server: +[algo.local]: " -r algo_server_name + algo_server_name=${algo_server_name:-algo.local} + + ROLES="openstack vpn cloud" + EXTRA_VARS="algo_server_name=$algo_server_name" + source $os_rc +} + gce () { read -p " Enter the local path to your credentials JSON file (https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts): @@ -433,10 +536,13 @@ algo_provisioning () { echo -n " What provider would you like to use? 1. DigitalOcean - 2. Amazon EC2 - 3. Microsoft Azure - 4. Google Compute Engine - 5. Install to existing Ubuntu 16.04 server + 2. Amazon Lightsail + 3. Amazon EC2 + 4. Microsoft Azure + 5. Google Compute Engine + 6. Scaleway + 7. OpenStack (DreamCompute optimised) + 8. Install to existing Ubuntu 16.04 server Enter the number of your desired provider : " @@ -445,10 +551,13 @@ Enter the number of your desired provider case "$N" in 1) digitalocean; ;; - 2) ec2; ;; - 3) azure; ;; - 4) gce; ;; - 5) non_cloud; ;; + 2) lightsail; ;; + 3) ec2; ;; + 4) azure; ;; + 5) gce; ;; + 6) scaleway; ;; + 7) openstack; ;; + 8) non_cloud; ;; *) exit 1 ;; esac diff --git a/config.cfg b/config.cfg index 40382e6..d5cc0a5 100644 --- a/config.cfg +++ b/config.cfg @@ -86,6 +86,16 @@ cloud_providers: gce: size: f1-micro image: ubuntu-1604 # ubuntu-1604 / ubuntu-1704 + lightsail: + size: nano_1_0 + image: ubuntu_16_04 + scaleway: + size: VC1S + image: Ubuntu Xenial + arch: x86_64 + openstack: + flavor_ram: ">=512" + image: Ubuntu-16.04 local: fail_hint: diff --git a/deploy.yml b/deploy.yml index 6caa70c..fa5212e 100644 --- a/deploy.yml +++ b/deploy.yml @@ -7,11 +7,11 @@ pre_tasks: - block: - name: Local pre-tasks - include: playbooks/local.yml + include_tasks: playbooks/local.yml tags: [ 'always' ] - name: Local pre-tasks - include: playbooks/local_ssh.yml + include_tasks: playbooks/local_ssh.yml become: false when: Deployed_By_Algo is defined and Deployed_By_Algo == "Y" tags: [ 'local' ] @@ -26,12 +26,15 @@ - { role: cloud-ec2, tags: ['ec2'] } - { role: cloud-gce, tags: ['gce'] } - { role: cloud-azure, tags: ['azure'] } + - { role: cloud-lightsail, tags: ['lightsail'] } + - { role: cloud-scaleway, tags: ['scaleway'] } + - { role: cloud-openstack, tags: ['openstack'] } - { role: local, tags: ['local'] } post_tasks: - block: - name: Local post-tasks - include: playbooks/post.yml + include_tasks: playbooks/post.yml become: false tags: [ 'cloud' ] rescue: @@ -51,8 +54,8 @@ pre_tasks: - block: - name: Common pre-tasks - include: playbooks/common.yml - tags: [ 'digitalocean', 'ec2', 'gce', 'azure', 'local', 'pre' ] + include_tasks: playbooks/common.yml + tags: [ 'digitalocean', 'ec2', 'gce', 'azure', 'lightsail', 'scaleway', 'openstack', 'local', 'pre' ] rescue: - debug: var=fail_hint tags: always @@ -60,7 +63,6 @@ tags: always roles: - - { role: security, tags: [ 'security' ] } - { role: dns_adblocking, tags: ['dns', 'adblock' ] } - { role: ssh_tunneling, tags: [ 'ssh_tunneling' ] } - { role: vpn, tags: [ 'vpn' ] } diff --git a/library/digital_ocean_tag.py b/library/digital_ocean_tag.py index b80d18b..30a3185 100644 --- a/library/digital_ocean_tag.py +++ b/library/digital_ocean_tag.py @@ -1,26 +1,25 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + DOCUMENTATION = ''' --- module: digital_ocean_tag short_description: Create and remove tag(s) to DigitalOcean resource. description: - Create and remove tag(s) to DigitalOcean resource. +author: "Victor Volle (@kontrafiktion)" version_added: "2.2" options: name: @@ -31,9 +30,11 @@ options: resource_id: description: - The ID of the resource to operate on. + - The data type of resource_id is changed from integer to string, from version 2.5. + aliases: ['droplet_id'] resource_type: description: - - The type of resource to operate on. Currently only tagging of + - The type of resource to operate on. Currently, only tagging of droplets is supported. default: droplet choices: ['droplet'] @@ -65,7 +66,7 @@ EXAMPLES = ''' - name: tag a resource; creating the tag if it does not exists digital_ocean_tag: name: "{{ item }}" - resource_id: YYY + resource_id: "73333005" state: present with_items: - staging @@ -74,7 +75,7 @@ EXAMPLES = ''' - name: untag a resource digital_ocean_tag: name: staging - resource_id: YYY + resource_id: "73333005" state: absent # Deleting a tag also untags all the resources that have previously been @@ -104,133 +105,90 @@ data: } ''' -import json -import os - +from traceback import format_exc from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -class Response(object): - - def __init__(self, resp, info): - self.body = None - if resp: - self.body = resp.read() - self.info = info - - @property - def json(self): - if not self.body: - if "body" in self.info: - return json.loads(self.info["body"]) - return None - try: - return json.loads(self.body) - except ValueError: - return None - - @property - def status_code(self): - return self.info["status"] - - -class Rest(object): - - def __init__(self, module, headers): - self.module = module - self.headers = headers - self.baseurl = 'https://api.digitalocean.com/v2' - - def _url_builder(self, path): - if path[0] == '/': - path = path[1:] - return '%s/%s' % (self.baseurl, path) - - def send(self, method, path, data=None, headers=None): - url = self._url_builder(path) - data = self.module.jsonify(data) - - resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method) - - return Response(resp, info) - - def get(self, path, data=None, headers=None): - return self.send('GET', path, data, headers) - - def put(self, path, data=None, headers=None): - return self.send('PUT', path, data, headers) - - def post(self, path, data=None, headers=None): - return self.send('POST', path, data, headers) - - def delete(self, path, data=None, headers=None): - return self.send('DELETE', path, data, headers) +from ansible.module_utils.digital_ocean import DigitalOceanHelper +from ansible.module_utils._text import to_native def core(module): - try: - api_token = module.params['api_token'] or \ - os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY'] - except KeyError as e: - module.fail_json(msg='Unable to load %s' % e.message) - state = module.params['state'] name = module.params['name'] resource_id = module.params['resource_id'] resource_type = module.params['resource_type'] - rest = Rest(module, {'Authorization': 'Bearer {}'.format(api_token), - 'Content-type': 'application/json'}) + rest = DigitalOceanHelper(module) - if state in ('present'): - if name is None: - module.fail_json(msg='parameter `name` is missing') - - # Ensure Tag exists - response = rest.post("tags", data={'name': name}) + # Check if api_token is valid or not + response = rest.get('account') + if response.status_code == 401: + module.fail_json(msg='Failed to login using api_token, please verify ' + 'validity of api_token') + if state == 'present': + response = rest.get('tags/{0}'.format(name)) status_code = response.status_code - json = response.json - if status_code == 201: - changed = True - elif status_code == 422: + resp_json = response.json + changed = False + if status_code == 200 and resp_json['tag']['name'] == name: changed = False else: - module.exit_json(changed=False, data=json) + # Ensure Tag exists + response = rest.post("tags", data={'name': name}) + status_code = response.status_code + resp_json = response.json + if status_code == 201: + changed = True + elif status_code == 422: + changed = False + else: + module.exit_json(changed=False, data=resp_json) if resource_id is None: # No resource defined, we're done. - if json is None: - module.exit_json(changed=changed, data=json) - else: - module.exit_json(changed=changed, data=json) + module.exit_json(changed=changed, data=resp_json) else: - # Tag a resource - url = "tags/{}/resources".format(name) - payload = { - 'resources': [{ - 'resource_id': resource_id, - 'resource_type': resource_type}]} - response = rest.post(url, data=payload) - if response.status_code == 204: - module.exit_json(changed=True) + # Check if resource is already tagged or not + found = False + url = "{0}?tag_name={1}".format(resource_type, name) + if resource_type == 'droplet': + url = "droplets?tag_name={0}".format(name) + response = rest.get(url) + status_code = response.status_code + resp_json = response.json + if status_code == 200: + for resource in resp_json['droplets']: + if not found and resource['id'] == int(resource_id): + found = True + break + if not found: + # If resource is not tagged, tag a resource + url = "tags/{0}/resources".format(name) + payload = { + 'resources': [{ + 'resource_id': resource_id, + 'resource_type': resource_type}]} + response = rest.post(url, data=payload) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.fail_json(msg="error tagging resource '{0}': {1}".format(resource_id, response.json["message"])) + else: + # Already tagged resource + module.exit_json(changed=False) else: - module.fail_json(msg="error tagging resource '{}': {}".format( - resource_id, response.json["message"])) - - elif state in ('absent'): - if name is None: - module.fail_json(msg='parameter `name` is missing') + # Unable to find resource specified by user + module.fail_json(msg=resp_json['message']) + elif state == 'absent': if resource_id: - url = "tags/{}/resources".format(name) + url = "tags/{0}/resources".format(name) payload = { 'resources': [{ 'resource_id': resource_id, 'resource_type': resource_type}]} response = rest.delete(url, data=payload) else: - url = "tags/{}".format(name) + url = "tags/{0}".format(name) response = rest.delete(url) if response.status_code == 204: module.exit_json(changed=True) @@ -252,7 +210,8 @@ def main(): try: core(module) except Exception as e: - module.fail_json(msg=str(e)) + module.fail_json(msg=to_native(e), exception=format_exc()) + if __name__ == '__main__': main() diff --git a/library/lightsail.py b/library/lightsail.py new file mode 100644 index 0000000..99e49ac --- /dev/null +++ b/library/lightsail.py @@ -0,0 +1,551 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: lightsail +short_description: Create or delete a virtual machine instance in AWS Lightsail +description: + - Creates or instances in AWS Lightsail and optionally wait for it to be 'running'. +version_added: "2.4" +author: "Nick Ball (@nickball)" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent', 'running', 'restarted', 'stopped'] + name: + description: + - Name of the instance + required: true + default : null + zone: + description: + - AWS availability zone in which to launch the instance. Required when state='present' + required: false + default: null + blueprint_id: + description: + - ID of the instance blueprint image. Required when state='present' + required: false + default: null + bundle_id: + description: + - Bundle of specification info for the instance. Required when state='present' + required: false + default: null + user_data: + description: + - Launch script that can configure the instance with additional data + required: false + default: null + key_pair_name: + description: + - Name of the key pair to use with the instance + required: false + default: null + wait: + description: + - Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned + default: "yes" + choices: [ "yes", "no" ] + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + open_ports: + description: + - Adds public ports to an Amazon Lightsail instance. + default: null + suboptions: + from_port: + description: Begin of the range + required: true + default: null + to_port: + description: End of the range + required: true + default: null + protocol: + description: Accepted traffic protocol. + required: true + choices: + - udp + - tcp + - all + default: null +requirements: + - "python >= 2.6" + - boto3 + +extends_documentation_fragment: + - aws + - ec2 +''' + + +EXAMPLES = ''' +# Create a new Lightsail instance, register the instance details +- lightsail: + state: present + name: myinstance + region: us-east-1 + zone: us-east-1a + blueprint_id: ubuntu_16_04 + bundle_id: nano_1_0 + key_pair_name: id_rsa + user_data: " echo 'hello world' > /home/ubuntu/test.txt" + wait_timeout: 500 + open_ports: + - from_port: 4500 + to_port: 4500 + protocol: udp + - from_port: 500 + to_port: 500 + protocol: udp + register: my_instance + +- debug: + msg: "Name is {{ my_instance.instance.name }}" + +- debug: + msg: "IP is {{ my_instance.instance.publicIpAddress }}" + +# Delete an instance if present +- lightsail: + state: absent + region: us-east-1 + name: myinstance + +''' + +RETURN = ''' +changed: + description: if a snapshot has been modified/created + returned: always + type: bool + sample: + changed: true +instance: + description: instance data + returned: always + type: dict + sample: + arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87" + blueprint_id: "ubuntu_16_04" + blueprint_name: "Ubuntu" + bundle_id: "nano_1_0" + created_at: "2017-03-27T08:38:59.714000-04:00" + hardware: + cpu_count: 1 + ram_size_in_gb: 0.5 + is_static_ip: false + location: + availability_zone: "us-east-1a" + region_name: "us-east-1" + name: "my_instance" + networking: + monthly_transfer: + gb_per_month_allocated: 1024 + ports: + - access_direction: "inbound" + access_from: "Anywhere (0.0.0.0/0)" + access_type: "public" + common_name: "" + from_port: 80 + protocol: tcp + to_port: 80 + - access_direction: "inbound" + access_from: "Anywhere (0.0.0.0/0)" + access_type: "public" + common_name: "" + from_port: 22 + protocol: tcp + to_port: 22 + private_ip_address: "172.26.8.14" + public_ip_address: "34.207.152.202" + resource_type: "Instance" + ssh_key_name: "keypair" + state: + code: 16 + name: running + support_code: "588307843083/i-0997c97831ee21e33" + username: "ubuntu" +''' + +import time +import traceback + +try: + import botocore + HAS_BOTOCORE = True +except ImportError: + HAS_BOTOCORE = False + +try: + import boto3 +except ImportError: + # will be caught by imported HAS_BOTO3 + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn, + HAS_BOTO3, camel_dict_to_snake_dict) + + +def create_instance(module, client, instance_name): + """ + Create an instance + + module: Ansible module object + client: authenticated lightsail connection object + instance_name: name of instance to delete + + Returns a dictionary of instance information + about the new instance. + + """ + + changed = False + + # Check if instance already exists + inst = None + try: + inst = _find_instance_info(client, instance_name) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] != 'NotFoundException': + module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e)) + + zone = module.params.get('zone') + blueprint_id = module.params.get('blueprint_id') + bundle_id = module.params.get('bundle_id') + user_data = module.params.get('user_data') + user_data = '' if user_data is None else user_data + + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + wait_max = time.time() + wait_timeout + + if module.params.get('key_pair_name'): + key_pair_name = module.params.get('key_pair_name') + else: + key_pair_name = '' + + if module.params.get('open_ports'): + open_ports = module.params.get('open_ports') + else: + open_ports = '[]' + + resp = None + if inst is None: + try: + resp = client.create_instances( + instanceNames=[ + instance_name + ], + availabilityZone=zone, + blueprintId=blueprint_id, + bundleId=bundle_id, + userData=user_data, + keyPairName=key_pair_name, + ) + resp = resp['operations'][0] + except botocore.exceptions.ClientError as e: + module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e)) + + inst = _find_instance_info(client, instance_name) + + # Wait for instance to become running + if wait: + while (wait_max > time.time()) and (inst is not None and inst['state']['name'] != "running"): + try: + time.sleep(2) + inst = _find_instance_info(client, instance_name) + except botocore.exceptions.ClientError as e: + if e.response['ResponseMetadata']['HTTPStatusCode'] == "403": + module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name), + exception=traceback.format_exc()) + elif e.response['Error']['Code'] == "RequestExpired": + module.fail_json(msg="RequestExpired: Failed to start instance {0}.".format(instance_name), exception=traceback.format_exc()) + time.sleep(1) + + # Timed out + if wait and not changed and wait_max <= time.time(): + module.fail_json(msg="Wait for instance start timeout at %s" % time.asctime()) + + # Attempt to open ports + if open_ports: + if inst is not None: + try: + for o in open_ports: + resp = client.open_instance_public_ports( + instanceName=instance_name, + portInfo={ + 'fromPort': o['from_port'], + 'toPort': o['to_port'], + 'protocol': o['protocol'] + } + ) + except botocore.exceptions.ClientError as e: + module.fail_json(msg='Error opening ports for instance {0}, error: {1}'.format(instance_name, e)) + + changed = True + + return (changed, inst) + + +def delete_instance(module, client, instance_name): + """ + Terminates an instance + + module: Ansible module object + client: authenticated lightsail connection object + instance_name: name of instance to delete + + Returns a dictionary of instance information + about the instance deleted (pre-deletion). + + If the instance to be deleted is running + "changed" will be set to False. + + """ + + # It looks like deleting removes the instance immediately, nothing to wait for + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + wait_max = time.time() + wait_timeout + + changed = False + + inst = None + try: + inst = _find_instance_info(client, instance_name) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] != 'NotFoundException': + module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e)) + + # Wait for instance to exit transition state before deleting + if wait: + while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'): + try: + time.sleep(5) + inst = _find_instance_info(client, instance_name) + except botocore.exceptions.ClientError as e: + if e.response['ResponseMetadata']['HTTPStatusCode'] == "403": + module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name), + exception=traceback.format_exc()) + elif e.response['Error']['Code'] == "RequestExpired": + module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc()) + # sleep and retry + time.sleep(10) + + # Attempt to delete + if inst is not None: + while not changed and ((wait and wait_max > time.time()) or (not wait)): + try: + client.delete_instance(instanceName=instance_name) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e)) + + # Timed out + if wait and not changed and wait_max <= time.time(): + module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime()) + + return (changed, inst) + + +def restart_instance(module, client, instance_name): + """ + Reboot an existing instance + + module: Ansible module object + client: authenticated lightsail connection object + instance_name: name of instance to reboot + + Returns a dictionary of instance information + about the restarted instance + + If the instance was not able to reboot, + "changed" will be set to False. + + Wait will not apply here as this is an OS-level operation + """ + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + wait_max = time.time() + wait_timeout + + changed = False + + inst = None + try: + inst = _find_instance_info(client, instance_name) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] != 'NotFoundException': + module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e)) + + # Wait for instance to exit transition state before state change + if wait: + while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'): + try: + time.sleep(5) + inst = _find_instance_info(client, instance_name) + except botocore.exceptions.ClientError as e: + if e.response['ResponseMetadata']['HTTPStatusCode'] == "403": + module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name), + exception=traceback.format_exc()) + elif e.response['Error']['Code'] == "RequestExpired": + module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc()) + time.sleep(3) + + # send reboot + if inst is not None: + try: + client.reboot_instance(instanceName=instance_name) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] != 'NotFoundException': + module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e)) + changed = True + + return (changed, inst) + + +def startstop_instance(module, client, instance_name, state): + """ + Starts or stops an existing instance + + module: Ansible module object + client: authenticated lightsail connection object + instance_name: name of instance to start/stop + state: Target state ("running" or "stopped") + + Returns a dictionary of instance information + about the instance started/stopped + + If the instance was not able to state change, + "changed" will be set to False. + + """ + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + wait_max = time.time() + wait_timeout + + changed = False + + inst = None + try: + inst = _find_instance_info(client, instance_name) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] != 'NotFoundException': + module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e)) + + # Wait for instance to exit transition state before state change + if wait: + while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'): + try: + time.sleep(5) + inst = _find_instance_info(client, instance_name) + except botocore.exceptions.ClientError as e: + if e.response['ResponseMetadata']['HTTPStatusCode'] == "403": + module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name), + exception=traceback.format_exc()) + elif e.response['Error']['Code'] == "RequestExpired": + module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc()) + time.sleep(1) + + # Try state change + if inst is not None and inst['state']['name'] != state: + try: + if state == 'running': + client.start_instance(instanceName=instance_name) + else: + client.stop_instance(instanceName=instance_name) + except botocore.exceptions.ClientError as e: + module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e)) + changed = True + # Grab current instance info + inst = _find_instance_info(client, instance_name) + + return (changed, inst) + + +def core(module): + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg='region must be specified') + + client = None + try: + client = boto3_conn(module, conn_type='client', resource='lightsail', + region=region, endpoint=ec2_url, **aws_connect_kwargs) + except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: + module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc()) + + changed = False + state = module.params['state'] + name = module.params['name'] + + if state == 'absent': + changed, instance_dict = delete_instance(module, client, name) + elif state in ('running', 'stopped'): + changed, instance_dict = startstop_instance(module, client, name, state) + elif state == 'restarted': + changed, instance_dict = restart_instance(module, client, name) + elif state == 'present': + changed, instance_dict = create_instance(module, client, name) + + module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict)) + + +def _find_instance_info(client, instance_name): + ''' handle exceptions where this function is called ''' + inst = None + try: + inst = client.get_instance(instanceName=instance_name) + except botocore.exceptions.ClientError as e: + raise + return inst['instance'] + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']), + zone=dict(type='str'), + blueprint_id=dict(type='str'), + bundle_id=dict(type='str'), + key_pair_name=dict(type='str'), + user_data=dict(type='str'), + wait=dict(type='bool', default=True), + wait_timeout=dict(default=300), + open_ports=dict(type='list') + )) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='Python module "boto3" is missing, please install it') + + if not HAS_BOTOCORE: + module.fail_json(msg='Python module "botocore" is missing, please install it') + + try: + core(module) + except (botocore.exceptions.ClientError, Exception) as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/playbooks/common.yml b/playbooks/common.yml index 04a3966..5628c37 100644 --- a/playbooks/common.yml +++ b/playbooks/common.yml @@ -5,11 +5,11 @@ register: OS - name: Ubuntu pre-tasks - include: ubuntu.yml + include_tasks: ubuntu.yml when: '"Ubuntu" in OS.stdout' - name: FreeBSD pre-tasks - include: freebsd.yml + include_tasks: freebsd.yml when: '"FreeBSD" in OS.stdout' -- include: facts/main.yml +- include_tasks: facts/main.yml diff --git a/playbooks/freebsd.yml b/playbooks/freebsd.yml index 8cf0579..316c92a 100644 --- a/playbooks/freebsd.yml +++ b/playbooks/freebsd.yml @@ -6,4 +6,4 @@ - name: FreeBSD / HardenedBSD | Configure defaults raw: sudo ln -sf /usr/local/bin/python2.7 /usr/bin/python2.7 -- include: facts/FreeBSD.yml +- include_tasks: facts/FreeBSD.yml diff --git a/playbooks/post.yml b/playbooks/post.yml index f9f4198..e594b97 100644 --- a/playbooks/post.yml +++ b/playbooks/post.yml @@ -13,4 +13,4 @@ pause: seconds: 20 -- include: local_ssh.yml +- include_tasks: local_ssh.yml diff --git a/requirements.txt b/requirements.txt index 67ec4a1..e7443ab 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,12 +1,11 @@ -msrestazure setuptools>=11.3 -ansible>=2.1,<2.2.1 +ansible[azure]==2.4.3 dopy==0.3.5 boto>=2.5 boto3 -azure==2.0.0rc5 -msrest==0.4.1 apache-libcloud six pyopenssl jinja2==2.8 +shade +pycrypto diff --git a/roles/client/tasks/main.yml b/roles/client/tasks/main.yml index 6839714..0a3eedc 100644 --- a/roles/client/tasks/main.yml +++ b/roles/client/tasks/main.yml @@ -2,7 +2,7 @@ setup: - name: Include system based facts and tasks - include: systems/main.yml + include_tasks: systems/main.yml - name: Install prerequisites package: name="{{ item }}" state=present diff --git a/roles/client/tasks/systems/main.yml b/roles/client/tasks/systems/main.yml index 85da1eb..ba24c93 100644 --- a/roles/client/tasks/systems/main.yml +++ b/roles/client/tasks/systems/main.yml @@ -1,13 +1,13 @@ --- -- include: Debian.yml +- include_tasks: Debian.yml when: ansible_distribution == 'Debian' -- include: Ubuntu.yml +- include_tasks: Ubuntu.yml when: ansible_distribution == 'Ubuntu' -- include: CentOS.yml +- include_tasks: CentOS.yml when: ansible_distribution == 'CentOS' -- include: Fedora.yml +- include_tasks: Fedora.yml when: ansible_distribution == 'Fedora' diff --git a/roles/cloud-ec2/tasks/main.yml b/roles/cloud-ec2/tasks/main.yml index e32e70a..7d5894c 100644 --- a/roles/cloud-ec2/tasks/main.yml +++ b/roles/cloud-ec2/tasks/main.yml @@ -19,10 +19,10 @@ - set_fact: ami_image: "{{ ami_search.results[0].ami_id }}" - - include: encrypt_image.yml + - include_tasks: encrypt_image.yml tags: [encrypted] - - include: cloudformation.yml + - include_tasks: cloudformation.yml - name: Add new instance to host group add_host: @@ -38,7 +38,7 @@ cloud_instance_ip: "{{ stack.stack_outputs.ElasticIP }}" - name: Get EC2 instances - ec2_remote_facts: + ec2_instance_facts: aws_access_key: "{{ access_key }}" aws_secret_key: "{{ secret_key }}" region: "{{ region }}" diff --git a/roles/cloud-lightsail/tasks/main.yml b/roles/cloud-lightsail/tasks/main.yml new file mode 100644 index 0000000..ce28ceb --- /dev/null +++ b/roles/cloud-lightsail/tasks/main.yml @@ -0,0 +1,52 @@ +- block: + - set_fact: + access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}" + secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}" + region: "{{ algo_region | default(lookup('env','AWS_DEFAULT_REGION'), true) }}" + + - name: Create an instance + lightsail: + aws_access_key: "{{ access_key }}" + aws_secret_key: "{{ secret_key }}" + name: "{{ algo_server_name }}" + state: present + region: "{{ region }}" + zone: "{{ region }}a" + blueprint_id: "{{ cloud_providers.lightsail.image }}" + bundle_id: "{{ cloud_providers.lightsail.size }}" + wait_timeout: 300 + open_ports: + - from_port: 4500 + to_port: 4500 + protocol: udp + - from_port: 500 + to_port: 500 + protocol: udp + user_data: | + #!/bin/bash + mkdir -p /home/ubuntu/.ssh/ + echo "{{ lookup('file', '{{ SSH_keys.public }}') }}" >> /home/ubuntu/.ssh/authorized_keys + chown -R ubuntu: /home/ubuntu/.ssh/ + chmod 0700 /home/ubuntu/.ssh/ + chmod 0600 /home/ubuntu/.ssh/* + test + register: algo_instance + + - set_fact: + cloud_instance_ip: "{{ algo_instance['instance']['public_ip_address'] }}" + + - name: Add new instance to host group + add_host: + hostname: "{{ cloud_instance_ip }}" + groupname: vpn-host + ansible_ssh_user: ubuntu + ansible_python_interpreter: "/usr/bin/python2.7" + ansible_ssh_private_key_file: "{{ SSH_keys.private }}" + cloud_provider: lightsail + ipv6_support: no + + rescue: + - debug: var=fail_hint + tags: always + - fail: + tags: always diff --git a/roles/cloud-openstack/tasks/main.yml b/roles/cloud-openstack/tasks/main.yml new file mode 100644 index 0000000..aef49a5 --- /dev/null +++ b/roles/cloud-openstack/tasks/main.yml @@ -0,0 +1,87 @@ +--- +- block: + - name: Security group created + os_security_group: + state: "{{ state|default('present') }}" + name: "{{ algo_server_name }}-security_group" + description: AlgoVPN security group + register: os_security_group + + - name: Security rules created + os_security_group_rule: + state: "{{ state|default('present') }}" + security_group: "{{ os_security_group.id }}" + protocol: "{{ item.proto }}" + port_range_min: "{{ item.port_min }}" + port_range_max: "{{ item.port_max }}" + remote_ip_prefix: "{{ item.range }}" + with_items: + - { proto: tcp, port_min: 22, port_max: 22, range: 0.0.0.0/0 } + - { proto: icmp, port_min: -1, port_max: -1, range: 0.0.0.0/0 } + - { proto: udp, port_min: 4500, port_max: 4500, range: 0.0.0.0/0 } + - { proto: udp, port_min: 500, port_max: 500, range: 0.0.0.0/0 } + + - name: Keypair created + os_keypair: + state: "{{ state|default('present') }}" + name: "{{ SSH_keys.comment|regex_replace('@', '_') }}" + public_key_file: "{{ SSH_keys.public }}" + register: os_keypair + + - name: Gather facts about flavors + os_flavor_facts: + ram: "{{ cloud_providers.openstack.flavor_ram }}" + + - name: Gather facts about images + os_image_facts: + image: "{{ cloud_providers.openstack.image }}" + + - name: Gather facts about public networks + os_networks_facts: + + - name: Set the network as a fact + set_fact: + public_network_id: "{{ item.id }}" + when: + - item['router:external']|default(omit) + - item['admin_state_up']|default(omit) + - item['status'] == 'ACTIVE' + with_items: "{{ openstack_networks }}" + + - name: Set facts + set_fact: + flavor_id: "{{ (openstack_flavors | sort(attribute='ram'))[0]['id'] }}" + image_id: "{{ openstack_image['id'] }}" + keypair_name: "{{ os_keypair.key.name }}" + security_group_name: "{{ os_security_group['secgroup']['name'] }}" + + - name: Server created + os_server: + state: "{{ state|default('present') }}" + name: "{{ algo_server_name }}" + image: "{{ image_id }}" + flavor: "{{ flavor_id }}" + key_name: "{{ keypair_name }}" + security_groups: "{{ security_group_name }}" + nics: + - net-id: "{{ public_network_id }}" + register: os_server + + - set_fact: + cloud_instance_ip: "{{ os_server['openstack']['public_v4'] }}" + + - name: Add new instance to host group + add_host: + hostname: "{{ cloud_instance_ip }}" + groupname: vpn-host + ansible_ssh_user: ubuntu + ansible_python_interpreter: "/usr/bin/python2.7" + ansible_ssh_private_key_file: "{{ SSH_keys.private }}" + cloud_provider: openstack + ipv6_support: omit + + rescue: + - debug: var=fail_hint + tags: always + - fail: + tags: always diff --git a/roles/cloud-scaleway/tasks/main.yml b/roles/cloud-scaleway/tasks/main.yml new file mode 100644 index 0000000..ca4e4e6 --- /dev/null +++ b/roles/cloud-scaleway/tasks/main.yml @@ -0,0 +1,128 @@ +- block: + - name: Check if server exists + uri: + url: "https://cp-{{ algo_region }}.scaleway.com/servers" + method: GET + headers: + Content-Type: 'application/json' + X-Auth-Token: "{{ scaleway_auth_token }}" + status_code: 200 + register: scaleway_servers + + - name: Set server id as a fact + set_fact: + server_id: "{{ item.id }}" + no_log: true + when: algo_server_name == item.name + with_items: "{{ scaleway_servers.json.servers }}" + + - name: Create a server if it doesn't exist + block: + - name: Get the organization id + uri: + url: https://account.cloud.online.net/organizations + method: GET + headers: + Content-Type: 'application/json' + X-Auth-Token: "{{ scaleway_auth_token }}" + status_code: 200 + register: scaleway_organizations + + - name: Set organization id as a fact + set_fact: + organization_id: "{{ item.id }}" + no_log: true + when: scaleway_organization == item.name + with_items: "{{ scaleway_organizations.json.organizations }}" + + - name: Get images + uri: + url: "https://cp-{{ algo_region }}.scaleway.com/images" + method: GET + headers: + Content-Type: 'application/json' + X-Auth-Token: "{{ scaleway_auth_token }}" + status_code: 200 + register: scaleway_images + + - name: Set image id as a fact + set_fact: + image_id: "{{ item.id }}" + no_log: true + when: + - cloud_providers.scaleway.image in item.name + - cloud_providers.scaleway.arch == item.arch + with_items: "{{ scaleway_images.json.images }}" + + - name: Create a server + uri: + url: "https://cp-{{ algo_region }}.scaleway.com/servers/" + method: POST + headers: + Content-Type: 'application/json' + X-Auth-Token: "{{ scaleway_auth_token }}" + body: + organization: "{{ organization_id }}" + name: "{{ algo_server_name }}" + image: "{{ image_id }}" + commercial_type: "{{cloud_providers.scaleway.size }}" + tags: + - Environment:Algo + - AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }} + enable_ipv6: true + status_code: 201 + body_format: json + register: algo_instance + + - name: Set server id as a fact + set_fact: + server_id: "{{ algo_instance.json.server.id }}" + when: server_id is not defined + + - name: Power on the server + uri: + url: https://cp-{{ algo_region }}.scaleway.com/servers/{{ server_id }}/action + method: POST + headers: + Content-Type: application/json + X-Auth-Token: "{{ scaleway_auth_token }}" + body: + action: poweron + status_code: 202 + body_format: json + ignore_errors: true + no_log: true + + - name: Wait for the server to become running + uri: + url: "https://cp-{{ algo_region }}.scaleway.com/servers/{{ server_id }}" + method: GET + headers: + Content-Type: 'application/json' + X-Auth-Token: "{{ scaleway_auth_token }}" + status_code: 200 + until: + - algo_instance.json.server.state is defined + - algo_instance.json.server.state == "running" + retries: 20 + delay: 30 + register: algo_instance + + - set_fact: + cloud_instance_ip: "{{ algo_instance['json']['server']['public_ip']['address'] }}" + + - name: Add new instance to host group + add_host: + hostname: "{{ cloud_instance_ip }}" + groupname: vpn-host + ansible_ssh_user: root + ansible_python_interpreter: "/usr/bin/python2.7" + ansible_ssh_private_key_file: "{{ SSH_keys.private }}" + cloud_provider: scaleway + ipv6_support: yes + + rescue: + - debug: var=fail_hint + tags: always + - fail: + tags: always diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml index 781930e..5b6aa43 100644 --- a/roles/common/tasks/main.yml +++ b/roles/common/tasks/main.yml @@ -1,9 +1,9 @@ --- - block: - - include: ubuntu.yml + - include_tasks: ubuntu.yml when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' - - include: freebsd.yml + - include_tasks: freebsd.yml when: ansible_distribution == 'FreeBSD' - name: Install tools diff --git a/roles/common/tasks/ubuntu.yml b/roles/common/tasks/ubuntu.yml index 4c5705e..ce33774 100644 --- a/roles/common/tasks/ubuntu.yml +++ b/roles/common/tasks/ubuntu.yml @@ -1,46 +1,42 @@ --- +- name: Cloud only tasks + block: + - name: Install software updates + apt: update_cache=yes upgrade=dist -- name: Install software updates - apt: update_cache=yes upgrade=dist - tags: - - cloud + - name: Check if reboot is required + shell: > + if [[ -e /var/run/reboot-required ]]; then echo "required"; else echo "no"; fi + args: + executable: /bin/bash + register: reboot_required -- name: Check if reboot is required - shell: > - if [[ -e /var/run/reboot-required ]]; then echo "required"; else echo "no"; fi - args: - executable: /bin/bash - register: reboot_required - tags: - - cloud + - name: Reboot + shell: sleep 2 && shutdown -r now "Ansible updates triggered" + async: 1 + poll: 0 + when: reboot_required is defined and reboot_required.stdout == 'required' + ignore_errors: true -- name: Reboot - shell: sleep 2 && shutdown -r now "Ansible updates triggered" - async: 1 - poll: 0 - when: reboot_required is defined and reboot_required.stdout == 'required' - ignore_errors: true - tags: - - cloud + - name: Wait until SSH becomes ready... + local_action: + module: wait_for + port: 22 + host: "{{ inventory_hostname }}" + search_regex: OpenSSH + delay: 10 + timeout: 320 + when: reboot_required is defined and reboot_required.stdout == 'required' + become: false -- name: Wait until SSH becomes ready... - local_action: - module: wait_for - port: 22 - host: "{{ inventory_hostname }}" - search_regex: OpenSSH - delay: 10 - timeout: 320 - when: reboot_required is defined and reboot_required.stdout == 'required' - become: false - tags: - - cloud + - name: Include unatteded upgrades configuration + include_tasks: unattended-upgrades.yml -- name: Disable MOTD on login and SSHD - replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}" - with_items: - - { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/login' } - - { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/sshd' } + - name: Disable MOTD on login and SSHD + replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}" + with_items: + - { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/login' } + - { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/sshd' } tags: - cloud diff --git a/roles/common/tasks/unattended-upgrades.yml b/roles/common/tasks/unattended-upgrades.yml new file mode 100644 index 0000000..378c16e --- /dev/null +++ b/roles/common/tasks/unattended-upgrades.yml @@ -0,0 +1,21 @@ +--- +- name: Install unattended-upgrades + apt: + name: unattended-upgrades + state: latest + +- name: Configure unattended-upgrades + template: + src: 50unattended-upgrades.j2 + dest: /etc/apt/apt.conf.d/50unattended-upgrades + owner: root + group: root + mode: 0644 + +- name: Periodic upgrades configured + template: + src: 10periodic.j2 + dest: /etc/apt/apt.conf.d/10periodic + owner: root + group: root + mode: 0644 diff --git a/roles/security/templates/10periodic.j2 b/roles/common/templates/10periodic.j2 similarity index 76% rename from roles/security/templates/10periodic.j2 rename to roles/common/templates/10periodic.j2 index 7587020..5d37e9f 100644 --- a/roles/security/templates/10periodic.j2 +++ b/roles/common/templates/10periodic.j2 @@ -1,4 +1,4 @@ APT::Periodic::Update-Package-Lists "1"; APT::Periodic::Download-Upgradeable-Packages "1"; APT::Periodic::AutocleanInterval "7"; -APT::Periodic::Unattended-Upgrade "1"; \ No newline at end of file +APT::Periodic::Unattended-Upgrade "1"; diff --git a/roles/security/templates/50unattended-upgrades.j2 b/roles/common/templates/50unattended-upgrades.j2 similarity index 97% rename from roles/security/templates/50unattended-upgrades.j2 rename to roles/common/templates/50unattended-upgrades.j2 index 5f8fb15..0c55b70 100644 --- a/roles/security/templates/50unattended-upgrades.j2 +++ b/roles/common/templates/50unattended-upgrades.j2 @@ -15,7 +15,7 @@ Unattended-Upgrade::Package-Blacklist { }; // This option allows you to control if on a unclean dpkg exit -// unattended-upgrades will automatically run +// unattended-upgrades will automatically run // dpkg --force-confold --configure -a // The default is true, to ensure updates keep getting installed //Unattended-Upgrade::AutoFixInterruptedDpkg "false"; @@ -46,7 +46,7 @@ Unattended-Upgrade::Package-Blacklist { //Unattended-Upgrade::Remove-Unused-Dependencies "false"; // Automatically reboot *WITHOUT CONFIRMATION* -// if the file /var/run/reboot-required is found after the upgrade +// if the file /var/run/reboot-required is found after the upgrade //Unattended-Upgrade::Automatic-Reboot "false"; // If automatic reboot is enabled and needed, reboot at the specific diff --git a/roles/dns_adblocking/tasks/main.yml b/roles/dns_adblocking/tasks/main.yml index 2ba74b7..43c06d5 100644 --- a/roles/dns_adblocking/tasks/main.yml +++ b/roles/dns_adblocking/tasks/main.yml @@ -14,10 +14,10 @@ - name: The dnsmasq directory created file: dest=/var/lib/dnsmasq state=directory mode=0755 owner=dnsmasq group=nogroup - - include: ubuntu.yml + - include_tasks: ubuntu.yml when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' - - include: freebsd.yml + - include_tasks: freebsd.yml when: ansible_distribution == 'FreeBSD' - name: Dnsmasq configured diff --git a/roles/security/handlers/main.yml b/roles/security/handlers/main.yml deleted file mode 100644 index ab98db6..0000000 --- a/roles/security/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ -- name: restart ssh - service: name="{{ ssh_service_name|default('ssh') }}" state=restarted - -- name: flush routing cache - shell: echo 1 > /proc/sys/net/ipv4/route/flush diff --git a/roles/security/meta/main.yml b/roles/security/meta/main.yml deleted file mode 100644 index e985f92..0000000 --- a/roles/security/meta/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -dependencies: - - { role: common, tags: common } diff --git a/roles/security/tasks/main.yml b/roles/security/tasks/main.yml deleted file mode 100644 index 2f27912..0000000 --- a/roles/security/tasks/main.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -- block: - - name: Install tools - apt: name="{{ item }}" state=latest - with_items: - - unattended-upgrades - - - name: Configure unattended-upgrades - template: - src: 50unattended-upgrades.j2 - dest: /etc/apt/apt.conf.d/50unattended-upgrades - owner: root - group: root - mode: 0644 - - - name: Periodic upgrades configured - template: - src: 10periodic.j2 - dest: /etc/apt/apt.conf.d/10periodic - owner: root - group: root - mode: 0644 - - - name: Find directories for minimizing access - stat: - path: "{{ item }}" - register: minimize_access_directories - with_items: - - '/usr/local/sbin' - - '/usr/local/bin' - - '/usr/sbin' - - '/usr/bin' - - '/sbin' - - '/bin' - - - name: Minimize access - file: - path: '{{ item.stat.path }}' - mode: 'go-w' - recurse: yes - when: item.stat.isdir - with_items: "{{ minimize_access_directories.results }}" - no_log: True - - - name: Change shadow ownership to root and mode to 0600 - file: - dest: '/etc/shadow' - owner: root - group: root - mode: 0600 - - - name: change su-binary to only be accessible to user and group root - file: - dest: '/bin/su' - owner: root - group: root - mode: 0750 - - # Core dumps - - - name: Restrict core dumps (with PAM) - lineinfile: - dest: /etc/security/limits.conf - line: "* hard core 0" - state: present - - - name: Restrict core dumps (with sysctl) - sysctl: - name: fs.suid_dumpable - value: 0 - ignoreerrors: yes - sysctl_set: yes - reload: yes - state: present - - # Kernel fixes - - - name: Disable Source Routed Packet Acceptance - sysctl: - name: "{{item}}" - value: 0 - ignoreerrors: yes - sysctl_set: yes - reload: yes - state: present - with_items: - - net.ipv4.conf.all.accept_source_route - - net.ipv4.conf.default.accept_source_route - notify: - - flush routing cache - - - name: Disable ICMP Redirect Acceptance - sysctl: - name: "{{item}}" - value: 0 - ignoreerrors: yes - sysctl_set: yes - reload: yes - state: present - with_items: - - net.ipv4.conf.all.accept_redirects - - net.ipv4.conf.default.accept_redirects - - - name: Disable Secure ICMP Redirect Acceptance - sysctl: - name: "{{item}}" - value: 0 - ignoreerrors: yes - sysctl_set: yes - reload: yes - state: present - with_items: - - net.ipv4.conf.all.secure_redirects - - net.ipv4.conf.default.secure_redirects - notify: - - flush routing cache - - - name: Enable Bad Error Message Protection - sysctl: - name: net.ipv4.icmp_ignore_bogus_error_responses - value: 1 - ignoreerrors: yes - sysctl_set: yes - reload: yes - state: present - notify: - - flush routing cache - - - name: Enable RFC-recommended Source Route Validation - sysctl: - name: "{{item}}" - value: 1 - ignoreerrors: yes - sysctl_set: yes - reload: yes - state: present - with_items: - - net.ipv4.conf.all.rp_filter - - net.ipv4.conf.default.rp_filter - notify: - - flush routing cache - - - name: Do not send ICMP redirects (we are not a router) - sysctl: - name: net.ipv4.conf.all.send_redirects - value: 0 - - - name: SSH config - template: - src: sshd_config.j2 - dest: /etc/ssh/sshd_config - owner: root - group: root - mode: 0644 - notify: - - restart ssh - rescue: - - debug: var=fail_hint - tags: always - - fail: - tags: always diff --git a/roles/security/templates/sshd_config.j2 b/roles/security/templates/sshd_config.j2 deleted file mode 100644 index 4bdb260..0000000 --- a/roles/security/templates/sshd_config.j2 +++ /dev/null @@ -1,51 +0,0 @@ -Port 22 -# ListenAddress :: -# ListenAddress 0.0.0.0 -Protocol 2 - -# LogLevel VERBOSE logs user's key fingerprint on login. -# Needed to have a clear audit log of which keys were used to log in. -SyslogFacility AUTH -LogLevel VERBOSE - -# Use kernel sandbox mechanisms where possible -# Systrace on OpenBSD, Seccomp on Linux, seatbelt on macOS X (Darwin), rlimit elsewhere. -UsePrivilegeSeparation sandbox - -# Handy for keeping network connections alive -TCPKeepAlive yes -ClientAliveInterval 120 - -# Authentication -UsePAM yes -PermitRootLogin without-password -StrictModes yes -PubkeyAuthentication yes -AcceptEnv LANG LC_* - -# Turn off a lot of features -IgnoreRhosts yes -HostbasedAuthentication no -PermitEmptyPasswords no -ChallengeResponseAuthentication no -PasswordAuthentication no -UseDNS no - -# Do not enable sftp -# If you DO enable it, use this line to log which files sftp users read/write -# Subsystem sftp /usr/lib/ssh/sftp-server -f AUTHPRIV -l INFO - -# This makes ansible faster -PrintMotd no -PrintLastLog yes - -# Use only modern host keys -HostKey /etc/ssh/ssh_host_ed25519_key -HostKey /etc/ssh/ssh_host_ecdsa_key - -# Use only modern ciphers -KexAlgorithms curve25519-sha256@libssh.org,ecdh-sha2-nistp256 -Ciphers chacha20-poly1305@openssh.com,aes128-gcm@openssh.com -MACs hmac-sha2-256-etm@openssh.com -HostKeyAlgorithms ssh-ed25519,ecdsa-sha2-nistp256 -# PubkeyAcceptedKeyTypes accept anything diff --git a/roles/vpn/tasks/main.yml b/roles/vpn/tasks/main.yml index 8e732e1..e0d0d1b 100644 --- a/roles/vpn/tasks/main.yml +++ b/roles/vpn/tasks/main.yml @@ -6,20 +6,20 @@ - name: Ensure that the strongswan user exist user: name=strongswan group=strongswan state=present - - include: ubuntu.yml + - include_tasks: ubuntu.yml when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' - - include: freebsd.yml + - include_tasks: freebsd.yml when: ansible_distribution == 'FreeBSD' - name: Install strongSwan package: name=strongswan state=present - - include: ipec_configuration.yml - - include: openssl.yml + - include_tasks: ipec_configuration.yml + - include_tasks: openssl.yml tags: update-users - - include: distribute_keys.yml - - include: client_configs.yml + - include_tasks: distribute_keys.yml + - include_tasks: client_configs.yml delegate_to: localhost become: no tags: update-users diff --git a/roles/vpn/tasks/ubuntu.yml b/roles/vpn/tasks/ubuntu.yml index ccc561b..d3a858c 100644 --- a/roles/vpn/tasks/ubuntu.yml +++ b/roles/vpn/tasks/ubuntu.yml @@ -44,5 +44,5 @@ - daemon-reload - restart strongswan -- include: iptables.yml +- include_tasks: iptables.yml tags: iptables diff --git a/users.yml b/users.yml index 9279208..46a2d79 100644 --- a/users.yml +++ b/users.yml @@ -45,7 +45,7 @@ pre_tasks: - block: - name: Common pre-tasks - include: playbooks/common.yml + include_tasks: playbooks/common.yml tags: always rescue: - debug: var=fail_hint