mirror of
https://github.com/trailofbits/algo.git
synced 2025-06-07 15:43:54 +02:00
Ansible 2.4, Lightsail, Scaleway, DreamCompute (OpenStack) integration (#804)
* Move to ansible-2.4.3 * Add Lightsail support #623 * Fixing the EC2 deployment * Scaleway integration #623 * OpenStack cloud provider (DreamCompute optimised) #623 * Remove the security role * Enable unattended-upgrades for clouds * New requirements to make Azure and GCE work
This commit is contained in:
parent
4da752b603
commit
02427910de
29 changed files with 1123 additions and 430 deletions
|
@ -52,7 +52,7 @@ script:
|
||||||
# - shellcheck algo
|
# - shellcheck algo
|
||||||
# - ansible-lint deploy.yml users.yml deploy_client.yml
|
# - ansible-lint deploy.yml users.yml deploy_client.yml
|
||||||
- ansible-playbook deploy.yml --syntax-check
|
- ansible-playbook deploy.yml --syntax-check
|
||||||
- ansible-playbook deploy.yml -t local,vpn,dns,ssh_tunneling,security,tests -e "server_ip=$LXC_IP server_user=root IP_subject_alt_name=$LXC_IP local_dns=Y"
|
- ansible-playbook deploy.yml -t local,vpn,dns,ssh_tunneling,tests -e "server_ip=$LXC_IP server_user=root IP_subject_alt_name=$LXC_IP local_dns=Y"
|
||||||
|
|
||||||
after_script:
|
after_script:
|
||||||
- ./tests/update-users.sh
|
- ./tests/update-users.sh
|
||||||
|
|
137
algo
137
algo
|
@ -48,12 +48,6 @@ Do you want each user to have their own account for SSH tunneling?
|
||||||
ssh_tunneling_enabled=${ssh_tunneling_enabled:-n}
|
ssh_tunneling_enabled=${ssh_tunneling_enabled:-n}
|
||||||
if [[ "$ssh_tunneling_enabled" =~ ^(y|Y)$ ]]; then ROLES+=" ssh_tunneling"; fi
|
if [[ "$ssh_tunneling_enabled" =~ ^(y|Y)$ ]]; then ROLES+=" ssh_tunneling"; fi
|
||||||
|
|
||||||
read -p "
|
|
||||||
Do you want to apply operating system security enhancements on the server? (warning: replaces your sshd_config)
|
|
||||||
[y/N]: " -r security_enabled
|
|
||||||
security_enabled=${security_enabled:-n}
|
|
||||||
if [[ "$security_enabled" =~ ^(y|Y)$ ]]; then ROLES+=" security"; fi
|
|
||||||
|
|
||||||
read -p "
|
read -p "
|
||||||
Do you want the VPN to support Windows 10 or Linux Desktop clients? (enables compatible ciphers and key exchange, less secure)
|
Do you want the VPN to support Windows 10 or Linux Desktop clients? (enables compatible ciphers and key exchange, less secure)
|
||||||
[y/N]: " -r Win10_Enabled
|
[y/N]: " -r Win10_Enabled
|
||||||
|
@ -290,6 +284,115 @@ Enter the number of your desired region:
|
||||||
EXTRA_VARS="aws_access_key=$aws_access_key aws_secret_key=$aws_secret_key aws_server_name=$aws_server_name ssh_public_key=$ssh_public_key region=$region"
|
EXTRA_VARS="aws_access_key=$aws_access_key aws_secret_key=$aws_secret_key aws_server_name=$aws_server_name ssh_public_key=$ssh_public_key region=$region"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lightsail () {
|
||||||
|
read -p "
|
||||||
|
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||||
|
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md).
|
||||||
|
$ADDITIONAL_PROMPT
|
||||||
|
[AKIA...]: " -rs aws_access_key
|
||||||
|
|
||||||
|
read -p "
|
||||||
|
|
||||||
|
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||||
|
$ADDITIONAL_PROMPT
|
||||||
|
[ABCD...]: " -rs aws_secret_key
|
||||||
|
|
||||||
|
read -p "
|
||||||
|
|
||||||
|
Name the vpn server:
|
||||||
|
[algo.local]: " -r algo_server_name
|
||||||
|
algo_server_name=${algo_server_name:-algo.local}
|
||||||
|
|
||||||
|
read -p "
|
||||||
|
|
||||||
|
What region should the server be located in?
|
||||||
|
1. us-east-1 US East (N. Virginia)
|
||||||
|
2. us-east-2 US East (Ohio)
|
||||||
|
3. us-west-1 US West (N. California)
|
||||||
|
4. us-west-2 US West (Oregon)
|
||||||
|
5. ap-south-1 Asia Pacific (Mumbai)
|
||||||
|
6. ap-northeast-2 Asia Pacific (Seoul)
|
||||||
|
7. ap-southeast-1 Asia Pacific (Singapore)
|
||||||
|
8. ap-southeast-2 Asia Pacific (Sydney)
|
||||||
|
9. ap-northeast-1 Asia Pacific (Tokyo)
|
||||||
|
10. eu-central-1 EU (Frankfurt)
|
||||||
|
11. eu-west-1 EU (Ireland)
|
||||||
|
12. eu-west-2 EU (London)
|
||||||
|
Enter the number of your desired region:
|
||||||
|
[1]: " -r algo_region
|
||||||
|
algo_region=${algo_region:-1}
|
||||||
|
|
||||||
|
case "$algo_region" in
|
||||||
|
1) region="us-east-1" ;;
|
||||||
|
2) region="us-east-2" ;;
|
||||||
|
3) region="us-west-1" ;;
|
||||||
|
4) region="us-west-2" ;;
|
||||||
|
5) region="ap-south-1" ;;
|
||||||
|
6) region="ap-northeast-2" ;;
|
||||||
|
7) region="ap-southeast-1" ;;
|
||||||
|
8) region="ap-southeast-2" ;;
|
||||||
|
9) region="ap-northeast-1" ;;
|
||||||
|
10) region="eu-central-1" ;;
|
||||||
|
11) region="eu-west-1" ;;
|
||||||
|
12) region="eu-west-2";;
|
||||||
|
esac
|
||||||
|
|
||||||
|
ROLES="lightsail vpn cloud"
|
||||||
|
EXTRA_VARS="aws_access_key=$aws_access_key aws_secret_key=$aws_secret_key algo_server_name=$algo_server_name region=$region"
|
||||||
|
}
|
||||||
|
|
||||||
|
scaleway () {
|
||||||
|
read -p "
|
||||||
|
Enter your auth token (https://www.scaleway.com/docs/generate-an-api-token/)
|
||||||
|
$ADDITIONAL_PROMPT
|
||||||
|
[...]: " -rs scaleway_auth_token
|
||||||
|
|
||||||
|
read -p "
|
||||||
|
|
||||||
|
Enter your organization name (https://cloud.scaleway.com/#/billing)
|
||||||
|
$ADDITIONAL_PROMPT
|
||||||
|
[...]: " -rs scaleway_organization
|
||||||
|
|
||||||
|
read -p "
|
||||||
|
|
||||||
|
Name the vpn server:
|
||||||
|
[algo.local]: " -r algo_server_name
|
||||||
|
algo_server_name=${algo_server_name:-algo.local}
|
||||||
|
|
||||||
|
read -p "
|
||||||
|
|
||||||
|
What region should the server be located in?
|
||||||
|
1. par1 Paris
|
||||||
|
2. ams1 Amsterdam
|
||||||
|
Enter the number of your desired region:
|
||||||
|
[1]: " -r algo_region
|
||||||
|
algo_region=${algo_region:-1}
|
||||||
|
|
||||||
|
case "$algo_region" in
|
||||||
|
1) region="par1" ;;
|
||||||
|
2) region="ams1" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
ROLES="scaleway vpn cloud"
|
||||||
|
EXTRA_VARS="scaleway_auth_token=$scaleway_auth_token scaleway_organization=\"$scaleway_organization\" algo_server_name=$algo_server_name algo_region=$region"
|
||||||
|
}
|
||||||
|
|
||||||
|
openstack () {
|
||||||
|
read -p "
|
||||||
|
Enter the local path to your credentials OpenStack RC file (Can be donloaded from the OpenStack dashboard->Compute->API Access)
|
||||||
|
[...]: " -r os_rc
|
||||||
|
|
||||||
|
read -p "
|
||||||
|
|
||||||
|
Name the vpn server:
|
||||||
|
[algo.local]: " -r algo_server_name
|
||||||
|
algo_server_name=${algo_server_name:-algo.local}
|
||||||
|
|
||||||
|
ROLES="openstack vpn cloud"
|
||||||
|
EXTRA_VARS="algo_server_name=$algo_server_name"
|
||||||
|
source $os_rc
|
||||||
|
}
|
||||||
|
|
||||||
gce () {
|
gce () {
|
||||||
read -p "
|
read -p "
|
||||||
Enter the local path to your credentials JSON file (https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts):
|
Enter the local path to your credentials JSON file (https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts):
|
||||||
|
@ -433,10 +536,13 @@ algo_provisioning () {
|
||||||
echo -n "
|
echo -n "
|
||||||
What provider would you like to use?
|
What provider would you like to use?
|
||||||
1. DigitalOcean
|
1. DigitalOcean
|
||||||
2. Amazon EC2
|
2. Amazon Lightsail
|
||||||
3. Microsoft Azure
|
3. Amazon EC2
|
||||||
4. Google Compute Engine
|
4. Microsoft Azure
|
||||||
5. Install to existing Ubuntu 16.04 server
|
5. Google Compute Engine
|
||||||
|
6. Scaleway
|
||||||
|
7. OpenStack (DreamCompute optimised)
|
||||||
|
8. Install to existing Ubuntu 16.04 server
|
||||||
|
|
||||||
Enter the number of your desired provider
|
Enter the number of your desired provider
|
||||||
: "
|
: "
|
||||||
|
@ -445,10 +551,13 @@ Enter the number of your desired provider
|
||||||
|
|
||||||
case "$N" in
|
case "$N" in
|
||||||
1) digitalocean; ;;
|
1) digitalocean; ;;
|
||||||
2) ec2; ;;
|
2) lightsail; ;;
|
||||||
3) azure; ;;
|
3) ec2; ;;
|
||||||
4) gce; ;;
|
4) azure; ;;
|
||||||
5) non_cloud; ;;
|
5) gce; ;;
|
||||||
|
6) scaleway; ;;
|
||||||
|
7) openstack; ;;
|
||||||
|
8) non_cloud; ;;
|
||||||
*) exit 1 ;;
|
*) exit 1 ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
|
10
config.cfg
10
config.cfg
|
@ -86,6 +86,16 @@ cloud_providers:
|
||||||
gce:
|
gce:
|
||||||
size: f1-micro
|
size: f1-micro
|
||||||
image: ubuntu-1604 # ubuntu-1604 / ubuntu-1704
|
image: ubuntu-1604 # ubuntu-1604 / ubuntu-1704
|
||||||
|
lightsail:
|
||||||
|
size: nano_1_0
|
||||||
|
image: ubuntu_16_04
|
||||||
|
scaleway:
|
||||||
|
size: VC1S
|
||||||
|
image: Ubuntu Xenial
|
||||||
|
arch: x86_64
|
||||||
|
openstack:
|
||||||
|
flavor_ram: ">=512"
|
||||||
|
image: Ubuntu-16.04
|
||||||
local:
|
local:
|
||||||
|
|
||||||
fail_hint:
|
fail_hint:
|
||||||
|
|
14
deploy.yml
14
deploy.yml
|
@ -7,11 +7,11 @@
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- block:
|
- block:
|
||||||
- name: Local pre-tasks
|
- name: Local pre-tasks
|
||||||
include: playbooks/local.yml
|
include_tasks: playbooks/local.yml
|
||||||
tags: [ 'always' ]
|
tags: [ 'always' ]
|
||||||
|
|
||||||
- name: Local pre-tasks
|
- name: Local pre-tasks
|
||||||
include: playbooks/local_ssh.yml
|
include_tasks: playbooks/local_ssh.yml
|
||||||
become: false
|
become: false
|
||||||
when: Deployed_By_Algo is defined and Deployed_By_Algo == "Y"
|
when: Deployed_By_Algo is defined and Deployed_By_Algo == "Y"
|
||||||
tags: [ 'local' ]
|
tags: [ 'local' ]
|
||||||
|
@ -26,12 +26,15 @@
|
||||||
- { role: cloud-ec2, tags: ['ec2'] }
|
- { role: cloud-ec2, tags: ['ec2'] }
|
||||||
- { role: cloud-gce, tags: ['gce'] }
|
- { role: cloud-gce, tags: ['gce'] }
|
||||||
- { role: cloud-azure, tags: ['azure'] }
|
- { role: cloud-azure, tags: ['azure'] }
|
||||||
|
- { role: cloud-lightsail, tags: ['lightsail'] }
|
||||||
|
- { role: cloud-scaleway, tags: ['scaleway'] }
|
||||||
|
- { role: cloud-openstack, tags: ['openstack'] }
|
||||||
- { role: local, tags: ['local'] }
|
- { role: local, tags: ['local'] }
|
||||||
|
|
||||||
post_tasks:
|
post_tasks:
|
||||||
- block:
|
- block:
|
||||||
- name: Local post-tasks
|
- name: Local post-tasks
|
||||||
include: playbooks/post.yml
|
include_tasks: playbooks/post.yml
|
||||||
become: false
|
become: false
|
||||||
tags: [ 'cloud' ]
|
tags: [ 'cloud' ]
|
||||||
rescue:
|
rescue:
|
||||||
|
@ -51,8 +54,8 @@
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- block:
|
- block:
|
||||||
- name: Common pre-tasks
|
- name: Common pre-tasks
|
||||||
include: playbooks/common.yml
|
include_tasks: playbooks/common.yml
|
||||||
tags: [ 'digitalocean', 'ec2', 'gce', 'azure', 'local', 'pre' ]
|
tags: [ 'digitalocean', 'ec2', 'gce', 'azure', 'lightsail', 'scaleway', 'openstack', 'local', 'pre' ]
|
||||||
rescue:
|
rescue:
|
||||||
- debug: var=fail_hint
|
- debug: var=fail_hint
|
||||||
tags: always
|
tags: always
|
||||||
|
@ -60,7 +63,6 @@
|
||||||
tags: always
|
tags: always
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
- { role: security, tags: [ 'security' ] }
|
|
||||||
- { role: dns_adblocking, tags: ['dns', 'adblock' ] }
|
- { role: dns_adblocking, tags: ['dns', 'adblock' ] }
|
||||||
- { role: ssh_tunneling, tags: [ 'ssh_tunneling' ] }
|
- { role: ssh_tunneling, tags: [ 'ssh_tunneling' ] }
|
||||||
- { role: vpn, tags: [ 'vpn' ] }
|
- { role: vpn, tags: [ 'vpn' ] }
|
||||||
|
|
|
@ -1,26 +1,25 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# This file is part of Ansible
|
# Copyright: Ansible Project
|
||||||
#
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
from __future__ import absolute_import, division, print_function
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
__metaclass__ = type
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
'status': ['preview'],
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
'supported_by': 'community'}
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
---
|
---
|
||||||
module: digital_ocean_tag
|
module: digital_ocean_tag
|
||||||
short_description: Create and remove tag(s) to DigitalOcean resource.
|
short_description: Create and remove tag(s) to DigitalOcean resource.
|
||||||
description:
|
description:
|
||||||
- Create and remove tag(s) to DigitalOcean resource.
|
- Create and remove tag(s) to DigitalOcean resource.
|
||||||
|
author: "Victor Volle (@kontrafiktion)"
|
||||||
version_added: "2.2"
|
version_added: "2.2"
|
||||||
options:
|
options:
|
||||||
name:
|
name:
|
||||||
|
@ -31,9 +30,11 @@ options:
|
||||||
resource_id:
|
resource_id:
|
||||||
description:
|
description:
|
||||||
- The ID of the resource to operate on.
|
- The ID of the resource to operate on.
|
||||||
|
- The data type of resource_id is changed from integer to string, from version 2.5.
|
||||||
|
aliases: ['droplet_id']
|
||||||
resource_type:
|
resource_type:
|
||||||
description:
|
description:
|
||||||
- The type of resource to operate on. Currently only tagging of
|
- The type of resource to operate on. Currently, only tagging of
|
||||||
droplets is supported.
|
droplets is supported.
|
||||||
default: droplet
|
default: droplet
|
||||||
choices: ['droplet']
|
choices: ['droplet']
|
||||||
|
@ -65,7 +66,7 @@ EXAMPLES = '''
|
||||||
- name: tag a resource; creating the tag if it does not exists
|
- name: tag a resource; creating the tag if it does not exists
|
||||||
digital_ocean_tag:
|
digital_ocean_tag:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
resource_id: YYY
|
resource_id: "73333005"
|
||||||
state: present
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- staging
|
- staging
|
||||||
|
@ -74,7 +75,7 @@ EXAMPLES = '''
|
||||||
- name: untag a resource
|
- name: untag a resource
|
||||||
digital_ocean_tag:
|
digital_ocean_tag:
|
||||||
name: staging
|
name: staging
|
||||||
resource_id: YYY
|
resource_id: "73333005"
|
||||||
state: absent
|
state: absent
|
||||||
|
|
||||||
# Deleting a tag also untags all the resources that have previously been
|
# Deleting a tag also untags all the resources that have previously been
|
||||||
|
@ -104,133 +105,90 @@ data:
|
||||||
}
|
}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import json
|
from traceback import format_exc
|
||||||
import os
|
|
||||||
|
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
from ansible.module_utils.basic import AnsibleModule
|
||||||
from ansible.module_utils.urls import fetch_url
|
from ansible.module_utils.digital_ocean import DigitalOceanHelper
|
||||||
|
from ansible.module_utils._text import to_native
|
||||||
|
|
||||||
class Response(object):
|
|
||||||
|
|
||||||
def __init__(self, resp, info):
|
|
||||||
self.body = None
|
|
||||||
if resp:
|
|
||||||
self.body = resp.read()
|
|
||||||
self.info = info
|
|
||||||
|
|
||||||
@property
|
|
||||||
def json(self):
|
|
||||||
if not self.body:
|
|
||||||
if "body" in self.info:
|
|
||||||
return json.loads(self.info["body"])
|
|
||||||
return None
|
|
||||||
try:
|
|
||||||
return json.loads(self.body)
|
|
||||||
except ValueError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def status_code(self):
|
|
||||||
return self.info["status"]
|
|
||||||
|
|
||||||
|
|
||||||
class Rest(object):
|
|
||||||
|
|
||||||
def __init__(self, module, headers):
|
|
||||||
self.module = module
|
|
||||||
self.headers = headers
|
|
||||||
self.baseurl = 'https://api.digitalocean.com/v2'
|
|
||||||
|
|
||||||
def _url_builder(self, path):
|
|
||||||
if path[0] == '/':
|
|
||||||
path = path[1:]
|
|
||||||
return '%s/%s' % (self.baseurl, path)
|
|
||||||
|
|
||||||
def send(self, method, path, data=None, headers=None):
|
|
||||||
url = self._url_builder(path)
|
|
||||||
data = self.module.jsonify(data)
|
|
||||||
|
|
||||||
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
|
|
||||||
|
|
||||||
return Response(resp, info)
|
|
||||||
|
|
||||||
def get(self, path, data=None, headers=None):
|
|
||||||
return self.send('GET', path, data, headers)
|
|
||||||
|
|
||||||
def put(self, path, data=None, headers=None):
|
|
||||||
return self.send('PUT', path, data, headers)
|
|
||||||
|
|
||||||
def post(self, path, data=None, headers=None):
|
|
||||||
return self.send('POST', path, data, headers)
|
|
||||||
|
|
||||||
def delete(self, path, data=None, headers=None):
|
|
||||||
return self.send('DELETE', path, data, headers)
|
|
||||||
|
|
||||||
|
|
||||||
def core(module):
|
def core(module):
|
||||||
try:
|
|
||||||
api_token = module.params['api_token'] or \
|
|
||||||
os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY']
|
|
||||||
except KeyError as e:
|
|
||||||
module.fail_json(msg='Unable to load %s' % e.message)
|
|
||||||
|
|
||||||
state = module.params['state']
|
state = module.params['state']
|
||||||
name = module.params['name']
|
name = module.params['name']
|
||||||
resource_id = module.params['resource_id']
|
resource_id = module.params['resource_id']
|
||||||
resource_type = module.params['resource_type']
|
resource_type = module.params['resource_type']
|
||||||
|
|
||||||
rest = Rest(module, {'Authorization': 'Bearer {}'.format(api_token),
|
rest = DigitalOceanHelper(module)
|
||||||
'Content-type': 'application/json'})
|
|
||||||
|
|
||||||
if state in ('present'):
|
# Check if api_token is valid or not
|
||||||
if name is None:
|
response = rest.get('account')
|
||||||
module.fail_json(msg='parameter `name` is missing')
|
if response.status_code == 401:
|
||||||
|
module.fail_json(msg='Failed to login using api_token, please verify '
|
||||||
# Ensure Tag exists
|
'validity of api_token')
|
||||||
response = rest.post("tags", data={'name': name})
|
if state == 'present':
|
||||||
|
response = rest.get('tags/{0}'.format(name))
|
||||||
status_code = response.status_code
|
status_code = response.status_code
|
||||||
json = response.json
|
resp_json = response.json
|
||||||
if status_code == 201:
|
changed = False
|
||||||
changed = True
|
if status_code == 200 and resp_json['tag']['name'] == name:
|
||||||
elif status_code == 422:
|
|
||||||
changed = False
|
changed = False
|
||||||
else:
|
else:
|
||||||
module.exit_json(changed=False, data=json)
|
# Ensure Tag exists
|
||||||
|
response = rest.post("tags", data={'name': name})
|
||||||
|
status_code = response.status_code
|
||||||
|
resp_json = response.json
|
||||||
|
if status_code == 201:
|
||||||
|
changed = True
|
||||||
|
elif status_code == 422:
|
||||||
|
changed = False
|
||||||
|
else:
|
||||||
|
module.exit_json(changed=False, data=resp_json)
|
||||||
|
|
||||||
if resource_id is None:
|
if resource_id is None:
|
||||||
# No resource defined, we're done.
|
# No resource defined, we're done.
|
||||||
if json is None:
|
module.exit_json(changed=changed, data=resp_json)
|
||||||
module.exit_json(changed=changed, data=json)
|
|
||||||
else:
|
|
||||||
module.exit_json(changed=changed, data=json)
|
|
||||||
else:
|
else:
|
||||||
# Tag a resource
|
# Check if resource is already tagged or not
|
||||||
url = "tags/{}/resources".format(name)
|
found = False
|
||||||
payload = {
|
url = "{0}?tag_name={1}".format(resource_type, name)
|
||||||
'resources': [{
|
if resource_type == 'droplet':
|
||||||
'resource_id': resource_id,
|
url = "droplets?tag_name={0}".format(name)
|
||||||
'resource_type': resource_type}]}
|
response = rest.get(url)
|
||||||
response = rest.post(url, data=payload)
|
status_code = response.status_code
|
||||||
if response.status_code == 204:
|
resp_json = response.json
|
||||||
module.exit_json(changed=True)
|
if status_code == 200:
|
||||||
|
for resource in resp_json['droplets']:
|
||||||
|
if not found and resource['id'] == int(resource_id):
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
if not found:
|
||||||
|
# If resource is not tagged, tag a resource
|
||||||
|
url = "tags/{0}/resources".format(name)
|
||||||
|
payload = {
|
||||||
|
'resources': [{
|
||||||
|
'resource_id': resource_id,
|
||||||
|
'resource_type': resource_type}]}
|
||||||
|
response = rest.post(url, data=payload)
|
||||||
|
if response.status_code == 204:
|
||||||
|
module.exit_json(changed=True)
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="error tagging resource '{0}': {1}".format(resource_id, response.json["message"]))
|
||||||
|
else:
|
||||||
|
# Already tagged resource
|
||||||
|
module.exit_json(changed=False)
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="error tagging resource '{}': {}".format(
|
# Unable to find resource specified by user
|
||||||
resource_id, response.json["message"]))
|
module.fail_json(msg=resp_json['message'])
|
||||||
|
|
||||||
elif state in ('absent'):
|
|
||||||
if name is None:
|
|
||||||
module.fail_json(msg='parameter `name` is missing')
|
|
||||||
|
|
||||||
|
elif state == 'absent':
|
||||||
if resource_id:
|
if resource_id:
|
||||||
url = "tags/{}/resources".format(name)
|
url = "tags/{0}/resources".format(name)
|
||||||
payload = {
|
payload = {
|
||||||
'resources': [{
|
'resources': [{
|
||||||
'resource_id': resource_id,
|
'resource_id': resource_id,
|
||||||
'resource_type': resource_type}]}
|
'resource_type': resource_type}]}
|
||||||
response = rest.delete(url, data=payload)
|
response = rest.delete(url, data=payload)
|
||||||
else:
|
else:
|
||||||
url = "tags/{}".format(name)
|
url = "tags/{0}".format(name)
|
||||||
response = rest.delete(url)
|
response = rest.delete(url)
|
||||||
if response.status_code == 204:
|
if response.status_code == 204:
|
||||||
module.exit_json(changed=True)
|
module.exit_json(changed=True)
|
||||||
|
@ -252,7 +210,8 @@ def main():
|
||||||
try:
|
try:
|
||||||
core(module)
|
core(module)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=to_native(e), exception=format_exc())
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
551
library/lightsail.py
Normal file
551
library/lightsail.py
Normal file
|
@ -0,0 +1,551 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright: Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||||
|
'status': ['preview'],
|
||||||
|
'supported_by': 'community'}
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: lightsail
|
||||||
|
short_description: Create or delete a virtual machine instance in AWS Lightsail
|
||||||
|
description:
|
||||||
|
- Creates or instances in AWS Lightsail and optionally wait for it to be 'running'.
|
||||||
|
version_added: "2.4"
|
||||||
|
author: "Nick Ball (@nickball)"
|
||||||
|
options:
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- Indicate desired state of the target.
|
||||||
|
default: present
|
||||||
|
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name of the instance
|
||||||
|
required: true
|
||||||
|
default : null
|
||||||
|
zone:
|
||||||
|
description:
|
||||||
|
- AWS availability zone in which to launch the instance. Required when state='present'
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
blueprint_id:
|
||||||
|
description:
|
||||||
|
- ID of the instance blueprint image. Required when state='present'
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
bundle_id:
|
||||||
|
description:
|
||||||
|
- Bundle of specification info for the instance. Required when state='present'
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
user_data:
|
||||||
|
description:
|
||||||
|
- Launch script that can configure the instance with additional data
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
key_pair_name:
|
||||||
|
description:
|
||||||
|
- Name of the key pair to use with the instance
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
wait:
|
||||||
|
description:
|
||||||
|
- Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned
|
||||||
|
default: "yes"
|
||||||
|
choices: [ "yes", "no" ]
|
||||||
|
wait_timeout:
|
||||||
|
description:
|
||||||
|
- How long before wait gives up, in seconds.
|
||||||
|
default: 300
|
||||||
|
open_ports:
|
||||||
|
description:
|
||||||
|
- Adds public ports to an Amazon Lightsail instance.
|
||||||
|
default: null
|
||||||
|
suboptions:
|
||||||
|
from_port:
|
||||||
|
description: Begin of the range
|
||||||
|
required: true
|
||||||
|
default: null
|
||||||
|
to_port:
|
||||||
|
description: End of the range
|
||||||
|
required: true
|
||||||
|
default: null
|
||||||
|
protocol:
|
||||||
|
description: Accepted traffic protocol.
|
||||||
|
required: true
|
||||||
|
choices:
|
||||||
|
- udp
|
||||||
|
- tcp
|
||||||
|
- all
|
||||||
|
default: null
|
||||||
|
requirements:
|
||||||
|
- "python >= 2.6"
|
||||||
|
- boto3
|
||||||
|
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- aws
|
||||||
|
- ec2
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Create a new Lightsail instance, register the instance details
|
||||||
|
- lightsail:
|
||||||
|
state: present
|
||||||
|
name: myinstance
|
||||||
|
region: us-east-1
|
||||||
|
zone: us-east-1a
|
||||||
|
blueprint_id: ubuntu_16_04
|
||||||
|
bundle_id: nano_1_0
|
||||||
|
key_pair_name: id_rsa
|
||||||
|
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
|
||||||
|
wait_timeout: 500
|
||||||
|
open_ports:
|
||||||
|
- from_port: 4500
|
||||||
|
to_port: 4500
|
||||||
|
protocol: udp
|
||||||
|
- from_port: 500
|
||||||
|
to_port: 500
|
||||||
|
protocol: udp
|
||||||
|
register: my_instance
|
||||||
|
|
||||||
|
- debug:
|
||||||
|
msg: "Name is {{ my_instance.instance.name }}"
|
||||||
|
|
||||||
|
- debug:
|
||||||
|
msg: "IP is {{ my_instance.instance.publicIpAddress }}"
|
||||||
|
|
||||||
|
# Delete an instance if present
|
||||||
|
- lightsail:
|
||||||
|
state: absent
|
||||||
|
region: us-east-1
|
||||||
|
name: myinstance
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
changed:
|
||||||
|
description: if a snapshot has been modified/created
|
||||||
|
returned: always
|
||||||
|
type: bool
|
||||||
|
sample:
|
||||||
|
changed: true
|
||||||
|
instance:
|
||||||
|
description: instance data
|
||||||
|
returned: always
|
||||||
|
type: dict
|
||||||
|
sample:
|
||||||
|
arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
|
||||||
|
blueprint_id: "ubuntu_16_04"
|
||||||
|
blueprint_name: "Ubuntu"
|
||||||
|
bundle_id: "nano_1_0"
|
||||||
|
created_at: "2017-03-27T08:38:59.714000-04:00"
|
||||||
|
hardware:
|
||||||
|
cpu_count: 1
|
||||||
|
ram_size_in_gb: 0.5
|
||||||
|
is_static_ip: false
|
||||||
|
location:
|
||||||
|
availability_zone: "us-east-1a"
|
||||||
|
region_name: "us-east-1"
|
||||||
|
name: "my_instance"
|
||||||
|
networking:
|
||||||
|
monthly_transfer:
|
||||||
|
gb_per_month_allocated: 1024
|
||||||
|
ports:
|
||||||
|
- access_direction: "inbound"
|
||||||
|
access_from: "Anywhere (0.0.0.0/0)"
|
||||||
|
access_type: "public"
|
||||||
|
common_name: ""
|
||||||
|
from_port: 80
|
||||||
|
protocol: tcp
|
||||||
|
to_port: 80
|
||||||
|
- access_direction: "inbound"
|
||||||
|
access_from: "Anywhere (0.0.0.0/0)"
|
||||||
|
access_type: "public"
|
||||||
|
common_name: ""
|
||||||
|
from_port: 22
|
||||||
|
protocol: tcp
|
||||||
|
to_port: 22
|
||||||
|
private_ip_address: "172.26.8.14"
|
||||||
|
public_ip_address: "34.207.152.202"
|
||||||
|
resource_type: "Instance"
|
||||||
|
ssh_key_name: "keypair"
|
||||||
|
state:
|
||||||
|
code: 16
|
||||||
|
name: running
|
||||||
|
support_code: "588307843083/i-0997c97831ee21e33"
|
||||||
|
username: "ubuntu"
|
||||||
|
'''
|
||||||
|
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
try:
|
||||||
|
import botocore
|
||||||
|
HAS_BOTOCORE = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_BOTOCORE = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import boto3
|
||||||
|
except ImportError:
|
||||||
|
# will be caught by imported HAS_BOTO3
|
||||||
|
pass
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import AnsibleModule
|
||||||
|
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
|
||||||
|
HAS_BOTO3, camel_dict_to_snake_dict)
|
||||||
|
|
||||||
|
|
||||||
|
def create_instance(module, client, instance_name):
|
||||||
|
"""
|
||||||
|
Create an instance
|
||||||
|
|
||||||
|
module: Ansible module object
|
||||||
|
client: authenticated lightsail connection object
|
||||||
|
instance_name: name of instance to delete
|
||||||
|
|
||||||
|
Returns a dictionary of instance information
|
||||||
|
about the new instance.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
# Check if instance already exists
|
||||||
|
inst = None
|
||||||
|
try:
|
||||||
|
inst = _find_instance_info(client, instance_name)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
if e.response['Error']['Code'] != 'NotFoundException':
|
||||||
|
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||||
|
|
||||||
|
zone = module.params.get('zone')
|
||||||
|
blueprint_id = module.params.get('blueprint_id')
|
||||||
|
bundle_id = module.params.get('bundle_id')
|
||||||
|
user_data = module.params.get('user_data')
|
||||||
|
user_data = '' if user_data is None else user_data
|
||||||
|
|
||||||
|
wait = module.params.get('wait')
|
||||||
|
wait_timeout = int(module.params.get('wait_timeout'))
|
||||||
|
wait_max = time.time() + wait_timeout
|
||||||
|
|
||||||
|
if module.params.get('key_pair_name'):
|
||||||
|
key_pair_name = module.params.get('key_pair_name')
|
||||||
|
else:
|
||||||
|
key_pair_name = ''
|
||||||
|
|
||||||
|
if module.params.get('open_ports'):
|
||||||
|
open_ports = module.params.get('open_ports')
|
||||||
|
else:
|
||||||
|
open_ports = '[]'
|
||||||
|
|
||||||
|
resp = None
|
||||||
|
if inst is None:
|
||||||
|
try:
|
||||||
|
resp = client.create_instances(
|
||||||
|
instanceNames=[
|
||||||
|
instance_name
|
||||||
|
],
|
||||||
|
availabilityZone=zone,
|
||||||
|
blueprintId=blueprint_id,
|
||||||
|
bundleId=bundle_id,
|
||||||
|
userData=user_data,
|
||||||
|
keyPairName=key_pair_name,
|
||||||
|
)
|
||||||
|
resp = resp['operations'][0]
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e))
|
||||||
|
|
||||||
|
inst = _find_instance_info(client, instance_name)
|
||||||
|
|
||||||
|
# Wait for instance to become running
|
||||||
|
if wait:
|
||||||
|
while (wait_max > time.time()) and (inst is not None and inst['state']['name'] != "running"):
|
||||||
|
try:
|
||||||
|
time.sleep(2)
|
||||||
|
inst = _find_instance_info(client, instance_name)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||||
|
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
|
||||||
|
exception=traceback.format_exc())
|
||||||
|
elif e.response['Error']['Code'] == "RequestExpired":
|
||||||
|
module.fail_json(msg="RequestExpired: Failed to start instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
# Timed out
|
||||||
|
if wait and not changed and wait_max <= time.time():
|
||||||
|
module.fail_json(msg="Wait for instance start timeout at %s" % time.asctime())
|
||||||
|
|
||||||
|
# Attempt to open ports
|
||||||
|
if open_ports:
|
||||||
|
if inst is not None:
|
||||||
|
try:
|
||||||
|
for o in open_ports:
|
||||||
|
resp = client.open_instance_public_ports(
|
||||||
|
instanceName=instance_name,
|
||||||
|
portInfo={
|
||||||
|
'fromPort': o['from_port'],
|
||||||
|
'toPort': o['to_port'],
|
||||||
|
'protocol': o['protocol']
|
||||||
|
}
|
||||||
|
)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
module.fail_json(msg='Error opening ports for instance {0}, error: {1}'.format(instance_name, e))
|
||||||
|
|
||||||
|
changed = True
|
||||||
|
|
||||||
|
return (changed, inst)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_instance(module, client, instance_name):
|
||||||
|
"""
|
||||||
|
Terminates an instance
|
||||||
|
|
||||||
|
module: Ansible module object
|
||||||
|
client: authenticated lightsail connection object
|
||||||
|
instance_name: name of instance to delete
|
||||||
|
|
||||||
|
Returns a dictionary of instance information
|
||||||
|
about the instance deleted (pre-deletion).
|
||||||
|
|
||||||
|
If the instance to be deleted is running
|
||||||
|
"changed" will be set to False.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# It looks like deleting removes the instance immediately, nothing to wait for
|
||||||
|
wait = module.params.get('wait')
|
||||||
|
wait_timeout = int(module.params.get('wait_timeout'))
|
||||||
|
wait_max = time.time() + wait_timeout
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
inst = None
|
||||||
|
try:
|
||||||
|
inst = _find_instance_info(client, instance_name)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
if e.response['Error']['Code'] != 'NotFoundException':
|
||||||
|
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||||
|
|
||||||
|
# Wait for instance to exit transition state before deleting
|
||||||
|
if wait:
|
||||||
|
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
|
||||||
|
try:
|
||||||
|
time.sleep(5)
|
||||||
|
inst = _find_instance_info(client, instance_name)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||||
|
module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
|
||||||
|
exception=traceback.format_exc())
|
||||||
|
elif e.response['Error']['Code'] == "RequestExpired":
|
||||||
|
module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||||
|
# sleep and retry
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
# Attempt to delete
|
||||||
|
if inst is not None:
|
||||||
|
while not changed and ((wait and wait_max > time.time()) or (not wait)):
|
||||||
|
try:
|
||||||
|
client.delete_instance(instanceName=instance_name)
|
||||||
|
changed = True
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e))
|
||||||
|
|
||||||
|
# Timed out
|
||||||
|
if wait and not changed and wait_max <= time.time():
|
||||||
|
module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime())
|
||||||
|
|
||||||
|
return (changed, inst)
|
||||||
|
|
||||||
|
|
||||||
|
def restart_instance(module, client, instance_name):
|
||||||
|
"""
|
||||||
|
Reboot an existing instance
|
||||||
|
|
||||||
|
module: Ansible module object
|
||||||
|
client: authenticated lightsail connection object
|
||||||
|
instance_name: name of instance to reboot
|
||||||
|
|
||||||
|
Returns a dictionary of instance information
|
||||||
|
about the restarted instance
|
||||||
|
|
||||||
|
If the instance was not able to reboot,
|
||||||
|
"changed" will be set to False.
|
||||||
|
|
||||||
|
Wait will not apply here as this is an OS-level operation
|
||||||
|
"""
|
||||||
|
wait = module.params.get('wait')
|
||||||
|
wait_timeout = int(module.params.get('wait_timeout'))
|
||||||
|
wait_max = time.time() + wait_timeout
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
inst = None
|
||||||
|
try:
|
||||||
|
inst = _find_instance_info(client, instance_name)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
if e.response['Error']['Code'] != 'NotFoundException':
|
||||||
|
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||||
|
|
||||||
|
# Wait for instance to exit transition state before state change
|
||||||
|
if wait:
|
||||||
|
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
|
||||||
|
try:
|
||||||
|
time.sleep(5)
|
||||||
|
inst = _find_instance_info(client, instance_name)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||||
|
module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
|
||||||
|
exception=traceback.format_exc())
|
||||||
|
elif e.response['Error']['Code'] == "RequestExpired":
|
||||||
|
module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
# send reboot
|
||||||
|
if inst is not None:
|
||||||
|
try:
|
||||||
|
client.reboot_instance(instanceName=instance_name)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
if e.response['Error']['Code'] != 'NotFoundException':
|
||||||
|
module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e))
|
||||||
|
changed = True
|
||||||
|
|
||||||
|
return (changed, inst)
|
||||||
|
|
||||||
|
|
||||||
|
def startstop_instance(module, client, instance_name, state):
|
||||||
|
"""
|
||||||
|
Starts or stops an existing instance
|
||||||
|
|
||||||
|
module: Ansible module object
|
||||||
|
client: authenticated lightsail connection object
|
||||||
|
instance_name: name of instance to start/stop
|
||||||
|
state: Target state ("running" or "stopped")
|
||||||
|
|
||||||
|
Returns a dictionary of instance information
|
||||||
|
about the instance started/stopped
|
||||||
|
|
||||||
|
If the instance was not able to state change,
|
||||||
|
"changed" will be set to False.
|
||||||
|
|
||||||
|
"""
|
||||||
|
wait = module.params.get('wait')
|
||||||
|
wait_timeout = int(module.params.get('wait_timeout'))
|
||||||
|
wait_max = time.time() + wait_timeout
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
inst = None
|
||||||
|
try:
|
||||||
|
inst = _find_instance_info(client, instance_name)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
if e.response['Error']['Code'] != 'NotFoundException':
|
||||||
|
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
|
||||||
|
|
||||||
|
# Wait for instance to exit transition state before state change
|
||||||
|
if wait:
|
||||||
|
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
|
||||||
|
try:
|
||||||
|
time.sleep(5)
|
||||||
|
inst = _find_instance_info(client, instance_name)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
|
||||||
|
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
|
||||||
|
exception=traceback.format_exc())
|
||||||
|
elif e.response['Error']['Code'] == "RequestExpired":
|
||||||
|
module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc())
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
# Try state change
|
||||||
|
if inst is not None and inst['state']['name'] != state:
|
||||||
|
try:
|
||||||
|
if state == 'running':
|
||||||
|
client.start_instance(instanceName=instance_name)
|
||||||
|
else:
|
||||||
|
client.stop_instance(instanceName=instance_name)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e))
|
||||||
|
changed = True
|
||||||
|
# Grab current instance info
|
||||||
|
inst = _find_instance_info(client, instance_name)
|
||||||
|
|
||||||
|
return (changed, inst)
|
||||||
|
|
||||||
|
|
||||||
|
def core(module):
|
||||||
|
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||||
|
if not region:
|
||||||
|
module.fail_json(msg='region must be specified')
|
||||||
|
|
||||||
|
client = None
|
||||||
|
try:
|
||||||
|
client = boto3_conn(module, conn_type='client', resource='lightsail',
|
||||||
|
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||||
|
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
|
||||||
|
module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
state = module.params['state']
|
||||||
|
name = module.params['name']
|
||||||
|
|
||||||
|
if state == 'absent':
|
||||||
|
changed, instance_dict = delete_instance(module, client, name)
|
||||||
|
elif state in ('running', 'stopped'):
|
||||||
|
changed, instance_dict = startstop_instance(module, client, name, state)
|
||||||
|
elif state == 'restarted':
|
||||||
|
changed, instance_dict = restart_instance(module, client, name)
|
||||||
|
elif state == 'present':
|
||||||
|
changed, instance_dict = create_instance(module, client, name)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
|
||||||
|
|
||||||
|
|
||||||
|
def _find_instance_info(client, instance_name):
|
||||||
|
''' handle exceptions where this function is called '''
|
||||||
|
inst = None
|
||||||
|
try:
|
||||||
|
inst = client.get_instance(instanceName=instance_name)
|
||||||
|
except botocore.exceptions.ClientError as e:
|
||||||
|
raise
|
||||||
|
return inst['instance']
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
argument_spec = ec2_argument_spec()
|
||||||
|
argument_spec.update(dict(
|
||||||
|
name=dict(type='str', required=True),
|
||||||
|
state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']),
|
||||||
|
zone=dict(type='str'),
|
||||||
|
blueprint_id=dict(type='str'),
|
||||||
|
bundle_id=dict(type='str'),
|
||||||
|
key_pair_name=dict(type='str'),
|
||||||
|
user_data=dict(type='str'),
|
||||||
|
wait=dict(type='bool', default=True),
|
||||||
|
wait_timeout=dict(default=300),
|
||||||
|
open_ports=dict(type='list')
|
||||||
|
))
|
||||||
|
|
||||||
|
module = AnsibleModule(argument_spec=argument_spec)
|
||||||
|
|
||||||
|
if not HAS_BOTO3:
|
||||||
|
module.fail_json(msg='Python module "boto3" is missing, please install it')
|
||||||
|
|
||||||
|
if not HAS_BOTOCORE:
|
||||||
|
module.fail_json(msg='Python module "botocore" is missing, please install it')
|
||||||
|
|
||||||
|
try:
|
||||||
|
core(module)
|
||||||
|
except (botocore.exceptions.ClientError, Exception) as e:
|
||||||
|
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -5,11 +5,11 @@
|
||||||
register: OS
|
register: OS
|
||||||
|
|
||||||
- name: Ubuntu pre-tasks
|
- name: Ubuntu pre-tasks
|
||||||
include: ubuntu.yml
|
include_tasks: ubuntu.yml
|
||||||
when: '"Ubuntu" in OS.stdout'
|
when: '"Ubuntu" in OS.stdout'
|
||||||
|
|
||||||
- name: FreeBSD pre-tasks
|
- name: FreeBSD pre-tasks
|
||||||
include: freebsd.yml
|
include_tasks: freebsd.yml
|
||||||
when: '"FreeBSD" in OS.stdout'
|
when: '"FreeBSD" in OS.stdout'
|
||||||
|
|
||||||
- include: facts/main.yml
|
- include_tasks: facts/main.yml
|
||||||
|
|
|
@ -6,4 +6,4 @@
|
||||||
- name: FreeBSD / HardenedBSD | Configure defaults
|
- name: FreeBSD / HardenedBSD | Configure defaults
|
||||||
raw: sudo ln -sf /usr/local/bin/python2.7 /usr/bin/python2.7
|
raw: sudo ln -sf /usr/local/bin/python2.7 /usr/bin/python2.7
|
||||||
|
|
||||||
- include: facts/FreeBSD.yml
|
- include_tasks: facts/FreeBSD.yml
|
||||||
|
|
|
@ -13,4 +13,4 @@
|
||||||
pause:
|
pause:
|
||||||
seconds: 20
|
seconds: 20
|
||||||
|
|
||||||
- include: local_ssh.yml
|
- include_tasks: local_ssh.yml
|
||||||
|
|
|
@ -1,12 +1,11 @@
|
||||||
msrestazure
|
|
||||||
setuptools>=11.3
|
setuptools>=11.3
|
||||||
ansible>=2.1,<2.2.1
|
ansible[azure]==2.4.3
|
||||||
dopy==0.3.5
|
dopy==0.3.5
|
||||||
boto>=2.5
|
boto>=2.5
|
||||||
boto3
|
boto3
|
||||||
azure==2.0.0rc5
|
|
||||||
msrest==0.4.1
|
|
||||||
apache-libcloud
|
apache-libcloud
|
||||||
six
|
six
|
||||||
pyopenssl
|
pyopenssl
|
||||||
jinja2==2.8
|
jinja2==2.8
|
||||||
|
shade
|
||||||
|
pycrypto
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
setup:
|
setup:
|
||||||
|
|
||||||
- name: Include system based facts and tasks
|
- name: Include system based facts and tasks
|
||||||
include: systems/main.yml
|
include_tasks: systems/main.yml
|
||||||
|
|
||||||
- name: Install prerequisites
|
- name: Install prerequisites
|
||||||
package: name="{{ item }}" state=present
|
package: name="{{ item }}" state=present
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- include: Debian.yml
|
- include_tasks: Debian.yml
|
||||||
when: ansible_distribution == 'Debian'
|
when: ansible_distribution == 'Debian'
|
||||||
|
|
||||||
- include: Ubuntu.yml
|
- include_tasks: Ubuntu.yml
|
||||||
when: ansible_distribution == 'Ubuntu'
|
when: ansible_distribution == 'Ubuntu'
|
||||||
|
|
||||||
- include: CentOS.yml
|
- include_tasks: CentOS.yml
|
||||||
when: ansible_distribution == 'CentOS'
|
when: ansible_distribution == 'CentOS'
|
||||||
|
|
||||||
- include: Fedora.yml
|
- include_tasks: Fedora.yml
|
||||||
when: ansible_distribution == 'Fedora'
|
when: ansible_distribution == 'Fedora'
|
||||||
|
|
|
@ -19,10 +19,10 @@
|
||||||
- set_fact:
|
- set_fact:
|
||||||
ami_image: "{{ ami_search.results[0].ami_id }}"
|
ami_image: "{{ ami_search.results[0].ami_id }}"
|
||||||
|
|
||||||
- include: encrypt_image.yml
|
- include_tasks: encrypt_image.yml
|
||||||
tags: [encrypted]
|
tags: [encrypted]
|
||||||
|
|
||||||
- include: cloudformation.yml
|
- include_tasks: cloudformation.yml
|
||||||
|
|
||||||
- name: Add new instance to host group
|
- name: Add new instance to host group
|
||||||
add_host:
|
add_host:
|
||||||
|
@ -38,7 +38,7 @@
|
||||||
cloud_instance_ip: "{{ stack.stack_outputs.ElasticIP }}"
|
cloud_instance_ip: "{{ stack.stack_outputs.ElasticIP }}"
|
||||||
|
|
||||||
- name: Get EC2 instances
|
- name: Get EC2 instances
|
||||||
ec2_remote_facts:
|
ec2_instance_facts:
|
||||||
aws_access_key: "{{ access_key }}"
|
aws_access_key: "{{ access_key }}"
|
||||||
aws_secret_key: "{{ secret_key }}"
|
aws_secret_key: "{{ secret_key }}"
|
||||||
region: "{{ region }}"
|
region: "{{ region }}"
|
||||||
|
|
52
roles/cloud-lightsail/tasks/main.yml
Normal file
52
roles/cloud-lightsail/tasks/main.yml
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
- block:
|
||||||
|
- set_fact:
|
||||||
|
access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||||
|
secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||||
|
region: "{{ algo_region | default(lookup('env','AWS_DEFAULT_REGION'), true) }}"
|
||||||
|
|
||||||
|
- name: Create an instance
|
||||||
|
lightsail:
|
||||||
|
aws_access_key: "{{ access_key }}"
|
||||||
|
aws_secret_key: "{{ secret_key }}"
|
||||||
|
name: "{{ algo_server_name }}"
|
||||||
|
state: present
|
||||||
|
region: "{{ region }}"
|
||||||
|
zone: "{{ region }}a"
|
||||||
|
blueprint_id: "{{ cloud_providers.lightsail.image }}"
|
||||||
|
bundle_id: "{{ cloud_providers.lightsail.size }}"
|
||||||
|
wait_timeout: 300
|
||||||
|
open_ports:
|
||||||
|
- from_port: 4500
|
||||||
|
to_port: 4500
|
||||||
|
protocol: udp
|
||||||
|
- from_port: 500
|
||||||
|
to_port: 500
|
||||||
|
protocol: udp
|
||||||
|
user_data: |
|
||||||
|
#!/bin/bash
|
||||||
|
mkdir -p /home/ubuntu/.ssh/
|
||||||
|
echo "{{ lookup('file', '{{ SSH_keys.public }}') }}" >> /home/ubuntu/.ssh/authorized_keys
|
||||||
|
chown -R ubuntu: /home/ubuntu/.ssh/
|
||||||
|
chmod 0700 /home/ubuntu/.ssh/
|
||||||
|
chmod 0600 /home/ubuntu/.ssh/*
|
||||||
|
test
|
||||||
|
register: algo_instance
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
cloud_instance_ip: "{{ algo_instance['instance']['public_ip_address'] }}"
|
||||||
|
|
||||||
|
- name: Add new instance to host group
|
||||||
|
add_host:
|
||||||
|
hostname: "{{ cloud_instance_ip }}"
|
||||||
|
groupname: vpn-host
|
||||||
|
ansible_ssh_user: ubuntu
|
||||||
|
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||||
|
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||||
|
cloud_provider: lightsail
|
||||||
|
ipv6_support: no
|
||||||
|
|
||||||
|
rescue:
|
||||||
|
- debug: var=fail_hint
|
||||||
|
tags: always
|
||||||
|
- fail:
|
||||||
|
tags: always
|
87
roles/cloud-openstack/tasks/main.yml
Normal file
87
roles/cloud-openstack/tasks/main.yml
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
---
|
||||||
|
- block:
|
||||||
|
- name: Security group created
|
||||||
|
os_security_group:
|
||||||
|
state: "{{ state|default('present') }}"
|
||||||
|
name: "{{ algo_server_name }}-security_group"
|
||||||
|
description: AlgoVPN security group
|
||||||
|
register: os_security_group
|
||||||
|
|
||||||
|
- name: Security rules created
|
||||||
|
os_security_group_rule:
|
||||||
|
state: "{{ state|default('present') }}"
|
||||||
|
security_group: "{{ os_security_group.id }}"
|
||||||
|
protocol: "{{ item.proto }}"
|
||||||
|
port_range_min: "{{ item.port_min }}"
|
||||||
|
port_range_max: "{{ item.port_max }}"
|
||||||
|
remote_ip_prefix: "{{ item.range }}"
|
||||||
|
with_items:
|
||||||
|
- { proto: tcp, port_min: 22, port_max: 22, range: 0.0.0.0/0 }
|
||||||
|
- { proto: icmp, port_min: -1, port_max: -1, range: 0.0.0.0/0 }
|
||||||
|
- { proto: udp, port_min: 4500, port_max: 4500, range: 0.0.0.0/0 }
|
||||||
|
- { proto: udp, port_min: 500, port_max: 500, range: 0.0.0.0/0 }
|
||||||
|
|
||||||
|
- name: Keypair created
|
||||||
|
os_keypair:
|
||||||
|
state: "{{ state|default('present') }}"
|
||||||
|
name: "{{ SSH_keys.comment|regex_replace('@', '_') }}"
|
||||||
|
public_key_file: "{{ SSH_keys.public }}"
|
||||||
|
register: os_keypair
|
||||||
|
|
||||||
|
- name: Gather facts about flavors
|
||||||
|
os_flavor_facts:
|
||||||
|
ram: "{{ cloud_providers.openstack.flavor_ram }}"
|
||||||
|
|
||||||
|
- name: Gather facts about images
|
||||||
|
os_image_facts:
|
||||||
|
image: "{{ cloud_providers.openstack.image }}"
|
||||||
|
|
||||||
|
- name: Gather facts about public networks
|
||||||
|
os_networks_facts:
|
||||||
|
|
||||||
|
- name: Set the network as a fact
|
||||||
|
set_fact:
|
||||||
|
public_network_id: "{{ item.id }}"
|
||||||
|
when:
|
||||||
|
- item['router:external']|default(omit)
|
||||||
|
- item['admin_state_up']|default(omit)
|
||||||
|
- item['status'] == 'ACTIVE'
|
||||||
|
with_items: "{{ openstack_networks }}"
|
||||||
|
|
||||||
|
- name: Set facts
|
||||||
|
set_fact:
|
||||||
|
flavor_id: "{{ (openstack_flavors | sort(attribute='ram'))[0]['id'] }}"
|
||||||
|
image_id: "{{ openstack_image['id'] }}"
|
||||||
|
keypair_name: "{{ os_keypair.key.name }}"
|
||||||
|
security_group_name: "{{ os_security_group['secgroup']['name'] }}"
|
||||||
|
|
||||||
|
- name: Server created
|
||||||
|
os_server:
|
||||||
|
state: "{{ state|default('present') }}"
|
||||||
|
name: "{{ algo_server_name }}"
|
||||||
|
image: "{{ image_id }}"
|
||||||
|
flavor: "{{ flavor_id }}"
|
||||||
|
key_name: "{{ keypair_name }}"
|
||||||
|
security_groups: "{{ security_group_name }}"
|
||||||
|
nics:
|
||||||
|
- net-id: "{{ public_network_id }}"
|
||||||
|
register: os_server
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
cloud_instance_ip: "{{ os_server['openstack']['public_v4'] }}"
|
||||||
|
|
||||||
|
- name: Add new instance to host group
|
||||||
|
add_host:
|
||||||
|
hostname: "{{ cloud_instance_ip }}"
|
||||||
|
groupname: vpn-host
|
||||||
|
ansible_ssh_user: ubuntu
|
||||||
|
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||||
|
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||||
|
cloud_provider: openstack
|
||||||
|
ipv6_support: omit
|
||||||
|
|
||||||
|
rescue:
|
||||||
|
- debug: var=fail_hint
|
||||||
|
tags: always
|
||||||
|
- fail:
|
||||||
|
tags: always
|
128
roles/cloud-scaleway/tasks/main.yml
Normal file
128
roles/cloud-scaleway/tasks/main.yml
Normal file
|
@ -0,0 +1,128 @@
|
||||||
|
- block:
|
||||||
|
- name: Check if server exists
|
||||||
|
uri:
|
||||||
|
url: "https://cp-{{ algo_region }}.scaleway.com/servers"
|
||||||
|
method: GET
|
||||||
|
headers:
|
||||||
|
Content-Type: 'application/json'
|
||||||
|
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||||
|
status_code: 200
|
||||||
|
register: scaleway_servers
|
||||||
|
|
||||||
|
- name: Set server id as a fact
|
||||||
|
set_fact:
|
||||||
|
server_id: "{{ item.id }}"
|
||||||
|
no_log: true
|
||||||
|
when: algo_server_name == item.name
|
||||||
|
with_items: "{{ scaleway_servers.json.servers }}"
|
||||||
|
|
||||||
|
- name: Create a server if it doesn't exist
|
||||||
|
block:
|
||||||
|
- name: Get the organization id
|
||||||
|
uri:
|
||||||
|
url: https://account.cloud.online.net/organizations
|
||||||
|
method: GET
|
||||||
|
headers:
|
||||||
|
Content-Type: 'application/json'
|
||||||
|
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||||
|
status_code: 200
|
||||||
|
register: scaleway_organizations
|
||||||
|
|
||||||
|
- name: Set organization id as a fact
|
||||||
|
set_fact:
|
||||||
|
organization_id: "{{ item.id }}"
|
||||||
|
no_log: true
|
||||||
|
when: scaleway_organization == item.name
|
||||||
|
with_items: "{{ scaleway_organizations.json.organizations }}"
|
||||||
|
|
||||||
|
- name: Get images
|
||||||
|
uri:
|
||||||
|
url: "https://cp-{{ algo_region }}.scaleway.com/images"
|
||||||
|
method: GET
|
||||||
|
headers:
|
||||||
|
Content-Type: 'application/json'
|
||||||
|
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||||
|
status_code: 200
|
||||||
|
register: scaleway_images
|
||||||
|
|
||||||
|
- name: Set image id as a fact
|
||||||
|
set_fact:
|
||||||
|
image_id: "{{ item.id }}"
|
||||||
|
no_log: true
|
||||||
|
when:
|
||||||
|
- cloud_providers.scaleway.image in item.name
|
||||||
|
- cloud_providers.scaleway.arch == item.arch
|
||||||
|
with_items: "{{ scaleway_images.json.images }}"
|
||||||
|
|
||||||
|
- name: Create a server
|
||||||
|
uri:
|
||||||
|
url: "https://cp-{{ algo_region }}.scaleway.com/servers/"
|
||||||
|
method: POST
|
||||||
|
headers:
|
||||||
|
Content-Type: 'application/json'
|
||||||
|
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||||
|
body:
|
||||||
|
organization: "{{ organization_id }}"
|
||||||
|
name: "{{ algo_server_name }}"
|
||||||
|
image: "{{ image_id }}"
|
||||||
|
commercial_type: "{{cloud_providers.scaleway.size }}"
|
||||||
|
tags:
|
||||||
|
- Environment:Algo
|
||||||
|
- AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }}
|
||||||
|
enable_ipv6: true
|
||||||
|
status_code: 201
|
||||||
|
body_format: json
|
||||||
|
register: algo_instance
|
||||||
|
|
||||||
|
- name: Set server id as a fact
|
||||||
|
set_fact:
|
||||||
|
server_id: "{{ algo_instance.json.server.id }}"
|
||||||
|
when: server_id is not defined
|
||||||
|
|
||||||
|
- name: Power on the server
|
||||||
|
uri:
|
||||||
|
url: https://cp-{{ algo_region }}.scaleway.com/servers/{{ server_id }}/action
|
||||||
|
method: POST
|
||||||
|
headers:
|
||||||
|
Content-Type: application/json
|
||||||
|
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||||
|
body:
|
||||||
|
action: poweron
|
||||||
|
status_code: 202
|
||||||
|
body_format: json
|
||||||
|
ignore_errors: true
|
||||||
|
no_log: true
|
||||||
|
|
||||||
|
- name: Wait for the server to become running
|
||||||
|
uri:
|
||||||
|
url: "https://cp-{{ algo_region }}.scaleway.com/servers/{{ server_id }}"
|
||||||
|
method: GET
|
||||||
|
headers:
|
||||||
|
Content-Type: 'application/json'
|
||||||
|
X-Auth-Token: "{{ scaleway_auth_token }}"
|
||||||
|
status_code: 200
|
||||||
|
until:
|
||||||
|
- algo_instance.json.server.state is defined
|
||||||
|
- algo_instance.json.server.state == "running"
|
||||||
|
retries: 20
|
||||||
|
delay: 30
|
||||||
|
register: algo_instance
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
cloud_instance_ip: "{{ algo_instance['json']['server']['public_ip']['address'] }}"
|
||||||
|
|
||||||
|
- name: Add new instance to host group
|
||||||
|
add_host:
|
||||||
|
hostname: "{{ cloud_instance_ip }}"
|
||||||
|
groupname: vpn-host
|
||||||
|
ansible_ssh_user: root
|
||||||
|
ansible_python_interpreter: "/usr/bin/python2.7"
|
||||||
|
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
|
||||||
|
cloud_provider: scaleway
|
||||||
|
ipv6_support: yes
|
||||||
|
|
||||||
|
rescue:
|
||||||
|
- debug: var=fail_hint
|
||||||
|
tags: always
|
||||||
|
- fail:
|
||||||
|
tags: always
|
|
@ -1,9 +1,9 @@
|
||||||
---
|
---
|
||||||
- block:
|
- block:
|
||||||
- include: ubuntu.yml
|
- include_tasks: ubuntu.yml
|
||||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||||
|
|
||||||
- include: freebsd.yml
|
- include_tasks: freebsd.yml
|
||||||
when: ansible_distribution == 'FreeBSD'
|
when: ansible_distribution == 'FreeBSD'
|
||||||
|
|
||||||
- name: Install tools
|
- name: Install tools
|
||||||
|
|
|
@ -1,46 +1,42 @@
|
||||||
---
|
---
|
||||||
|
- name: Cloud only tasks
|
||||||
|
block:
|
||||||
|
- name: Install software updates
|
||||||
|
apt: update_cache=yes upgrade=dist
|
||||||
|
|
||||||
- name: Install software updates
|
- name: Check if reboot is required
|
||||||
apt: update_cache=yes upgrade=dist
|
shell: >
|
||||||
tags:
|
if [[ -e /var/run/reboot-required ]]; then echo "required"; else echo "no"; fi
|
||||||
- cloud
|
args:
|
||||||
|
executable: /bin/bash
|
||||||
|
register: reboot_required
|
||||||
|
|
||||||
- name: Check if reboot is required
|
- name: Reboot
|
||||||
shell: >
|
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
|
||||||
if [[ -e /var/run/reboot-required ]]; then echo "required"; else echo "no"; fi
|
async: 1
|
||||||
args:
|
poll: 0
|
||||||
executable: /bin/bash
|
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||||
register: reboot_required
|
ignore_errors: true
|
||||||
tags:
|
|
||||||
- cloud
|
|
||||||
|
|
||||||
- name: Reboot
|
- name: Wait until SSH becomes ready...
|
||||||
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
|
local_action:
|
||||||
async: 1
|
module: wait_for
|
||||||
poll: 0
|
port: 22
|
||||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
host: "{{ inventory_hostname }}"
|
||||||
ignore_errors: true
|
search_regex: OpenSSH
|
||||||
tags:
|
delay: 10
|
||||||
- cloud
|
timeout: 320
|
||||||
|
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||||
|
become: false
|
||||||
|
|
||||||
- name: Wait until SSH becomes ready...
|
- name: Include unatteded upgrades configuration
|
||||||
local_action:
|
include_tasks: unattended-upgrades.yml
|
||||||
module: wait_for
|
|
||||||
port: 22
|
|
||||||
host: "{{ inventory_hostname }}"
|
|
||||||
search_regex: OpenSSH
|
|
||||||
delay: 10
|
|
||||||
timeout: 320
|
|
||||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
|
||||||
become: false
|
|
||||||
tags:
|
|
||||||
- cloud
|
|
||||||
|
|
||||||
- name: Disable MOTD on login and SSHD
|
- name: Disable MOTD on login and SSHD
|
||||||
replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}"
|
replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/login' }
|
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/login' }
|
||||||
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/sshd' }
|
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/sshd' }
|
||||||
tags:
|
tags:
|
||||||
- cloud
|
- cloud
|
||||||
|
|
||||||
|
|
21
roles/common/tasks/unattended-upgrades.yml
Normal file
21
roles/common/tasks/unattended-upgrades.yml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
---
|
||||||
|
- name: Install unattended-upgrades
|
||||||
|
apt:
|
||||||
|
name: unattended-upgrades
|
||||||
|
state: latest
|
||||||
|
|
||||||
|
- name: Configure unattended-upgrades
|
||||||
|
template:
|
||||||
|
src: 50unattended-upgrades.j2
|
||||||
|
dest: /etc/apt/apt.conf.d/50unattended-upgrades
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
|
- name: Periodic upgrades configured
|
||||||
|
template:
|
||||||
|
src: 10periodic.j2
|
||||||
|
dest: /etc/apt/apt.conf.d/10periodic
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
|
@ -1,4 +1,4 @@
|
||||||
APT::Periodic::Update-Package-Lists "1";
|
APT::Periodic::Update-Package-Lists "1";
|
||||||
APT::Periodic::Download-Upgradeable-Packages "1";
|
APT::Periodic::Download-Upgradeable-Packages "1";
|
||||||
APT::Periodic::AutocleanInterval "7";
|
APT::Periodic::AutocleanInterval "7";
|
||||||
APT::Periodic::Unattended-Upgrade "1";
|
APT::Periodic::Unattended-Upgrade "1";
|
|
@ -15,7 +15,7 @@ Unattended-Upgrade::Package-Blacklist {
|
||||||
};
|
};
|
||||||
|
|
||||||
// This option allows you to control if on a unclean dpkg exit
|
// This option allows you to control if on a unclean dpkg exit
|
||||||
// unattended-upgrades will automatically run
|
// unattended-upgrades will automatically run
|
||||||
// dpkg --force-confold --configure -a
|
// dpkg --force-confold --configure -a
|
||||||
// The default is true, to ensure updates keep getting installed
|
// The default is true, to ensure updates keep getting installed
|
||||||
//Unattended-Upgrade::AutoFixInterruptedDpkg "false";
|
//Unattended-Upgrade::AutoFixInterruptedDpkg "false";
|
||||||
|
@ -46,7 +46,7 @@ Unattended-Upgrade::Package-Blacklist {
|
||||||
//Unattended-Upgrade::Remove-Unused-Dependencies "false";
|
//Unattended-Upgrade::Remove-Unused-Dependencies "false";
|
||||||
|
|
||||||
// Automatically reboot *WITHOUT CONFIRMATION*
|
// Automatically reboot *WITHOUT CONFIRMATION*
|
||||||
// if the file /var/run/reboot-required is found after the upgrade
|
// if the file /var/run/reboot-required is found after the upgrade
|
||||||
//Unattended-Upgrade::Automatic-Reboot "false";
|
//Unattended-Upgrade::Automatic-Reboot "false";
|
||||||
|
|
||||||
// If automatic reboot is enabled and needed, reboot at the specific
|
// If automatic reboot is enabled and needed, reboot at the specific
|
|
@ -14,10 +14,10 @@
|
||||||
- name: The dnsmasq directory created
|
- name: The dnsmasq directory created
|
||||||
file: dest=/var/lib/dnsmasq state=directory mode=0755 owner=dnsmasq group=nogroup
|
file: dest=/var/lib/dnsmasq state=directory mode=0755 owner=dnsmasq group=nogroup
|
||||||
|
|
||||||
- include: ubuntu.yml
|
- include_tasks: ubuntu.yml
|
||||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||||
|
|
||||||
- include: freebsd.yml
|
- include_tasks: freebsd.yml
|
||||||
when: ansible_distribution == 'FreeBSD'
|
when: ansible_distribution == 'FreeBSD'
|
||||||
|
|
||||||
- name: Dnsmasq configured
|
- name: Dnsmasq configured
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
- name: restart ssh
|
|
||||||
service: name="{{ ssh_service_name|default('ssh') }}" state=restarted
|
|
||||||
|
|
||||||
- name: flush routing cache
|
|
||||||
shell: echo 1 > /proc/sys/net/ipv4/route/flush
|
|
|
@ -1,4 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
dependencies:
|
|
||||||
- { role: common, tags: common }
|
|
|
@ -1,161 +0,0 @@
|
||||||
---
|
|
||||||
- block:
|
|
||||||
- name: Install tools
|
|
||||||
apt: name="{{ item }}" state=latest
|
|
||||||
with_items:
|
|
||||||
- unattended-upgrades
|
|
||||||
|
|
||||||
- name: Configure unattended-upgrades
|
|
||||||
template:
|
|
||||||
src: 50unattended-upgrades.j2
|
|
||||||
dest: /etc/apt/apt.conf.d/50unattended-upgrades
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
|
|
||||||
- name: Periodic upgrades configured
|
|
||||||
template:
|
|
||||||
src: 10periodic.j2
|
|
||||||
dest: /etc/apt/apt.conf.d/10periodic
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
|
|
||||||
- name: Find directories for minimizing access
|
|
||||||
stat:
|
|
||||||
path: "{{ item }}"
|
|
||||||
register: minimize_access_directories
|
|
||||||
with_items:
|
|
||||||
- '/usr/local/sbin'
|
|
||||||
- '/usr/local/bin'
|
|
||||||
- '/usr/sbin'
|
|
||||||
- '/usr/bin'
|
|
||||||
- '/sbin'
|
|
||||||
- '/bin'
|
|
||||||
|
|
||||||
- name: Minimize access
|
|
||||||
file:
|
|
||||||
path: '{{ item.stat.path }}'
|
|
||||||
mode: 'go-w'
|
|
||||||
recurse: yes
|
|
||||||
when: item.stat.isdir
|
|
||||||
with_items: "{{ minimize_access_directories.results }}"
|
|
||||||
no_log: True
|
|
||||||
|
|
||||||
- name: Change shadow ownership to root and mode to 0600
|
|
||||||
file:
|
|
||||||
dest: '/etc/shadow'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0600
|
|
||||||
|
|
||||||
- name: change su-binary to only be accessible to user and group root
|
|
||||||
file:
|
|
||||||
dest: '/bin/su'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0750
|
|
||||||
|
|
||||||
# Core dumps
|
|
||||||
|
|
||||||
- name: Restrict core dumps (with PAM)
|
|
||||||
lineinfile:
|
|
||||||
dest: /etc/security/limits.conf
|
|
||||||
line: "* hard core 0"
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Restrict core dumps (with sysctl)
|
|
||||||
sysctl:
|
|
||||||
name: fs.suid_dumpable
|
|
||||||
value: 0
|
|
||||||
ignoreerrors: yes
|
|
||||||
sysctl_set: yes
|
|
||||||
reload: yes
|
|
||||||
state: present
|
|
||||||
|
|
||||||
# Kernel fixes
|
|
||||||
|
|
||||||
- name: Disable Source Routed Packet Acceptance
|
|
||||||
sysctl:
|
|
||||||
name: "{{item}}"
|
|
||||||
value: 0
|
|
||||||
ignoreerrors: yes
|
|
||||||
sysctl_set: yes
|
|
||||||
reload: yes
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- net.ipv4.conf.all.accept_source_route
|
|
||||||
- net.ipv4.conf.default.accept_source_route
|
|
||||||
notify:
|
|
||||||
- flush routing cache
|
|
||||||
|
|
||||||
- name: Disable ICMP Redirect Acceptance
|
|
||||||
sysctl:
|
|
||||||
name: "{{item}}"
|
|
||||||
value: 0
|
|
||||||
ignoreerrors: yes
|
|
||||||
sysctl_set: yes
|
|
||||||
reload: yes
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- net.ipv4.conf.all.accept_redirects
|
|
||||||
- net.ipv4.conf.default.accept_redirects
|
|
||||||
|
|
||||||
- name: Disable Secure ICMP Redirect Acceptance
|
|
||||||
sysctl:
|
|
||||||
name: "{{item}}"
|
|
||||||
value: 0
|
|
||||||
ignoreerrors: yes
|
|
||||||
sysctl_set: yes
|
|
||||||
reload: yes
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- net.ipv4.conf.all.secure_redirects
|
|
||||||
- net.ipv4.conf.default.secure_redirects
|
|
||||||
notify:
|
|
||||||
- flush routing cache
|
|
||||||
|
|
||||||
- name: Enable Bad Error Message Protection
|
|
||||||
sysctl:
|
|
||||||
name: net.ipv4.icmp_ignore_bogus_error_responses
|
|
||||||
value: 1
|
|
||||||
ignoreerrors: yes
|
|
||||||
sysctl_set: yes
|
|
||||||
reload: yes
|
|
||||||
state: present
|
|
||||||
notify:
|
|
||||||
- flush routing cache
|
|
||||||
|
|
||||||
- name: Enable RFC-recommended Source Route Validation
|
|
||||||
sysctl:
|
|
||||||
name: "{{item}}"
|
|
||||||
value: 1
|
|
||||||
ignoreerrors: yes
|
|
||||||
sysctl_set: yes
|
|
||||||
reload: yes
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- net.ipv4.conf.all.rp_filter
|
|
||||||
- net.ipv4.conf.default.rp_filter
|
|
||||||
notify:
|
|
||||||
- flush routing cache
|
|
||||||
|
|
||||||
- name: Do not send ICMP redirects (we are not a router)
|
|
||||||
sysctl:
|
|
||||||
name: net.ipv4.conf.all.send_redirects
|
|
||||||
value: 0
|
|
||||||
|
|
||||||
- name: SSH config
|
|
||||||
template:
|
|
||||||
src: sshd_config.j2
|
|
||||||
dest: /etc/ssh/sshd_config
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
notify:
|
|
||||||
- restart ssh
|
|
||||||
rescue:
|
|
||||||
- debug: var=fail_hint
|
|
||||||
tags: always
|
|
||||||
- fail:
|
|
||||||
tags: always
|
|
|
@ -1,51 +0,0 @@
|
||||||
Port 22
|
|
||||||
# ListenAddress ::
|
|
||||||
# ListenAddress 0.0.0.0
|
|
||||||
Protocol 2
|
|
||||||
|
|
||||||
# LogLevel VERBOSE logs user's key fingerprint on login.
|
|
||||||
# Needed to have a clear audit log of which keys were used to log in.
|
|
||||||
SyslogFacility AUTH
|
|
||||||
LogLevel VERBOSE
|
|
||||||
|
|
||||||
# Use kernel sandbox mechanisms where possible
|
|
||||||
# Systrace on OpenBSD, Seccomp on Linux, seatbelt on macOS X (Darwin), rlimit elsewhere.
|
|
||||||
UsePrivilegeSeparation sandbox
|
|
||||||
|
|
||||||
# Handy for keeping network connections alive
|
|
||||||
TCPKeepAlive yes
|
|
||||||
ClientAliveInterval 120
|
|
||||||
|
|
||||||
# Authentication
|
|
||||||
UsePAM yes
|
|
||||||
PermitRootLogin without-password
|
|
||||||
StrictModes yes
|
|
||||||
PubkeyAuthentication yes
|
|
||||||
AcceptEnv LANG LC_*
|
|
||||||
|
|
||||||
# Turn off a lot of features
|
|
||||||
IgnoreRhosts yes
|
|
||||||
HostbasedAuthentication no
|
|
||||||
PermitEmptyPasswords no
|
|
||||||
ChallengeResponseAuthentication no
|
|
||||||
PasswordAuthentication no
|
|
||||||
UseDNS no
|
|
||||||
|
|
||||||
# Do not enable sftp
|
|
||||||
# If you DO enable it, use this line to log which files sftp users read/write
|
|
||||||
# Subsystem sftp /usr/lib/ssh/sftp-server -f AUTHPRIV -l INFO
|
|
||||||
|
|
||||||
# This makes ansible faster
|
|
||||||
PrintMotd no
|
|
||||||
PrintLastLog yes
|
|
||||||
|
|
||||||
# Use only modern host keys
|
|
||||||
HostKey /etc/ssh/ssh_host_ed25519_key
|
|
||||||
HostKey /etc/ssh/ssh_host_ecdsa_key
|
|
||||||
|
|
||||||
# Use only modern ciphers
|
|
||||||
KexAlgorithms curve25519-sha256@libssh.org,ecdh-sha2-nistp256
|
|
||||||
Ciphers chacha20-poly1305@openssh.com,aes128-gcm@openssh.com
|
|
||||||
MACs hmac-sha2-256-etm@openssh.com
|
|
||||||
HostKeyAlgorithms ssh-ed25519,ecdsa-sha2-nistp256
|
|
||||||
# PubkeyAcceptedKeyTypes accept anything
|
|
|
@ -6,20 +6,20 @@
|
||||||
- name: Ensure that the strongswan user exist
|
- name: Ensure that the strongswan user exist
|
||||||
user: name=strongswan group=strongswan state=present
|
user: name=strongswan group=strongswan state=present
|
||||||
|
|
||||||
- include: ubuntu.yml
|
- include_tasks: ubuntu.yml
|
||||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||||
|
|
||||||
- include: freebsd.yml
|
- include_tasks: freebsd.yml
|
||||||
when: ansible_distribution == 'FreeBSD'
|
when: ansible_distribution == 'FreeBSD'
|
||||||
|
|
||||||
- name: Install strongSwan
|
- name: Install strongSwan
|
||||||
package: name=strongswan state=present
|
package: name=strongswan state=present
|
||||||
|
|
||||||
- include: ipec_configuration.yml
|
- include_tasks: ipec_configuration.yml
|
||||||
- include: openssl.yml
|
- include_tasks: openssl.yml
|
||||||
tags: update-users
|
tags: update-users
|
||||||
- include: distribute_keys.yml
|
- include_tasks: distribute_keys.yml
|
||||||
- include: client_configs.yml
|
- include_tasks: client_configs.yml
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
become: no
|
become: no
|
||||||
tags: update-users
|
tags: update-users
|
||||||
|
|
|
@ -44,5 +44,5 @@
|
||||||
- daemon-reload
|
- daemon-reload
|
||||||
- restart strongswan
|
- restart strongswan
|
||||||
|
|
||||||
- include: iptables.yml
|
- include_tasks: iptables.yml
|
||||||
tags: iptables
|
tags: iptables
|
||||||
|
|
|
@ -45,7 +45,7 @@
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- block:
|
- block:
|
||||||
- name: Common pre-tasks
|
- name: Common pre-tasks
|
||||||
include: playbooks/common.yml
|
include_tasks: playbooks/common.yml
|
||||||
tags: always
|
tags: always
|
||||||
rescue:
|
rescue:
|
||||||
- debug: var=fail_hint
|
- debug: var=fail_hint
|
||||||
|
|
Loading…
Add table
Reference in a new issue