mirror of
https://github.com/trailofbits/algo.git
synced 2025-09-03 10:33:13 +02:00
Merge master into fix/ansible-no-log-sensitive branch
- Resolved conflicts in cloud provider prompt files - Updated to use new Ansible crypto modules from master - Added no_log directives to sensitive Ansible crypto tasks - Preserved privacy enhancements from the branch
This commit is contained in:
commit
d3333fcaa7
213 changed files with 9272 additions and 1905 deletions
|
@ -4,6 +4,8 @@ exclude_paths:
|
|||
- .github/
|
||||
- tests/legacy-lxd/
|
||||
- tests/
|
||||
- files/cloud-init/ # Cloud-init files have special format requirements
|
||||
- playbooks/ # These are task files included by other playbooks, not standalone playbooks
|
||||
|
||||
skip_list:
|
||||
- 'package-latest' # Package installs should not use latest - needed for updates
|
||||
|
@ -14,23 +16,33 @@ skip_list:
|
|||
- 'var-naming[pattern]' # Variable naming patterns
|
||||
- 'no-free-form' # Avoid free-form syntax - some legacy usage
|
||||
- 'key-order[task]' # Task key order
|
||||
- 'jinja[spacing]' # Jinja2 spacing
|
||||
- 'name[casing]' # Name casing
|
||||
- 'yaml[document-start]' # YAML document start
|
||||
- 'role-name' # Role naming convention - too many cloud-* roles
|
||||
- 'no-handler' # Handler usage - some legitimate non-handler use cases
|
||||
- 'name[missing]' # All tasks should be named - 113 issues to fix (temporary)
|
||||
|
||||
warn_list:
|
||||
- no-changed-when
|
||||
- yaml[line-length]
|
||||
- risky-file-permissions
|
||||
- name[missing]
|
||||
|
||||
# Enable additional rules
|
||||
enable_list:
|
||||
- no-log-password
|
||||
- no-same-owner
|
||||
- partial-become
|
||||
- name[play] # All plays should be named
|
||||
- yaml[new-line-at-end-of-file] # Files should end with newline
|
||||
- jinja[invalid] # Invalid Jinja2 syntax (catches template errors)
|
||||
- jinja[spacing] # Proper spacing in Jinja2 expressions
|
||||
|
||||
# Rules we're actively working on fixing
|
||||
# Move these from skip_list to enable_list as we fix them
|
||||
# - 'name[missing]' # All tasks should be named - 113 issues to fix
|
||||
# - 'no-changed-when' # Commands should not change things
|
||||
# - 'yaml[line-length]' # Line length limit
|
||||
# - 'risky-file-permissions' # File permissions
|
||||
|
||||
verbosity: 1
|
||||
|
||||
|
|
|
@ -1,18 +1,44 @@
|
|||
.dockerignore
|
||||
.git
|
||||
.github
|
||||
# Version control and CI
|
||||
.git/
|
||||
.github/
|
||||
.gitignore
|
||||
.travis.yml
|
||||
CONTRIBUTING.md
|
||||
Dockerfile
|
||||
README.md
|
||||
config.cfg
|
||||
configs
|
||||
docs
|
||||
|
||||
# Development environment
|
||||
.env
|
||||
logo.png
|
||||
tests
|
||||
.venv/
|
||||
.ruff_cache/
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.pyd
|
||||
|
||||
# Documentation and metadata
|
||||
docs/
|
||||
tests/
|
||||
README.md
|
||||
CHANGELOG.md
|
||||
CONTRIBUTING.md
|
||||
PULL_REQUEST_TEMPLATE.md
|
||||
SECURITY.md
|
||||
logo.png
|
||||
.travis.yml
|
||||
|
||||
# Build artifacts and configs
|
||||
configs/
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
Vagrantfile
|
||||
Makefile
|
||||
|
||||
# User configuration (should be bind-mounted)
|
||||
config.cfg
|
||||
|
||||
# IDE and editor files
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS generated files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
|
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
|
|
18
.github/actions/setup-uv/action.yml
vendored
Normal file
18
.github/actions/setup-uv/action.yml
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
name: 'Setup uv Environment'
|
||||
description: 'Install uv and sync dependencies for Algo VPN project'
|
||||
outputs:
|
||||
uv-version:
|
||||
description: 'The version of uv that was installed'
|
||||
value: ${{ steps.setup.outputs.uv-version }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install uv
|
||||
id: setup
|
||||
uses: astral-sh/setup-uv@1ddb97e5078301c0bec13b38151f8664ed04edc8 # v6
|
||||
with:
|
||||
enable-cache: true
|
||||
- name: Sync dependencies
|
||||
run: uv sync
|
||||
shell: bash
|
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
version: 2
|
||||
updates:
|
||||
# Maintain dependencies for GitHub Actions
|
||||
|
|
81
.github/workflows/claude-code-review.yml
vendored
Normal file
81
.github/workflows/claude-code-review.yml
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
---
|
||||
name: Claude Code Review
|
||||
|
||||
'on':
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
# Optional: Only run on specific file changes
|
||||
# paths:
|
||||
# - "src/**/*.ts"
|
||||
# - "src/**/*.tsx"
|
||||
# - "src/**/*.js"
|
||||
# - "src/**/*.jsx"
|
||||
|
||||
jobs:
|
||||
claude-review:
|
||||
# Optional: Filter by PR author
|
||||
# if: |
|
||||
# github.event.pull_request.user.login == 'external-contributor' ||
|
||||
# github.event.pull_request.user.login == 'new-developer' ||
|
||||
# github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR'
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run Claude Code Review
|
||||
id: claude-review
|
||||
uses: anthropics/claude-code-action@beta
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
|
||||
# Optional: Specify model (defaults to Claude Sonnet 4, uncomment for Claude Opus 4)
|
||||
# model: "claude-opus-4-20250514"
|
||||
|
||||
# Direct prompt for automated review (no @claude mention needed)
|
||||
direct_prompt: |
|
||||
Please review this pull request and provide feedback on:
|
||||
- Code quality and best practices
|
||||
- Potential bugs or issues
|
||||
- Performance considerations
|
||||
- Security concerns
|
||||
- Test coverage
|
||||
|
||||
Be constructive and helpful in your feedback.
|
||||
|
||||
# Optional: Use sticky comments to make Claude reuse the same comment on subsequent pushes to the same PR
|
||||
use_sticky_comment: true
|
||||
|
||||
# Optional: Customize review based on file types
|
||||
# direct_prompt: |
|
||||
# Review this PR focusing on:
|
||||
# - For TypeScript files: Type safety and proper interface usage
|
||||
# - For API endpoints: Security, input validation, and error handling
|
||||
# - For React components: Performance, accessibility, and best practices
|
||||
# - For tests: Coverage, edge cases, and test quality
|
||||
|
||||
# Optional: Different prompts for different authors
|
||||
# direct_prompt: |
|
||||
# ${{ github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR' &&
|
||||
# 'Welcome! Please review this PR from a first-time contributor. Be encouraging and provide detailed explanations for any suggestions.' ||
|
||||
# 'Please provide a thorough code review focusing on our coding standards and best practices.' }}
|
||||
|
||||
# Optional: Add specific tools for running tests or linting
|
||||
allowed_tools: >-
|
||||
Bash(ansible-playbook * --syntax-check),Bash(ansible-lint *),Bash(ruff check *),
|
||||
Bash(yamllint *),Bash(shellcheck *),Bash(python -m pytest *)
|
||||
|
||||
# Optional: Skip review for certain conditions
|
||||
# if: |
|
||||
# !contains(github.event.pull_request.title, '[skip-review]') &&
|
||||
# !contains(github.event.pull_request.title, '[WIP]')
|
68
.github/workflows/claude.yml
vendored
Normal file
68
.github/workflows/claude.yml
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
---
|
||||
name: Claude Code
|
||||
|
||||
'on':
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
types: [opened, assigned]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
|
||||
jobs:
|
||||
claude:
|
||||
if: |
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
actions: read # Required for Claude to read CI results on PRs
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run Claude Code
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@beta
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
|
||||
# This is an optional setting that allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
actions: read
|
||||
|
||||
# Optional: Specify model (defaults to Claude Sonnet 4, uncomment for Claude Opus 4)
|
||||
# model: "claude-opus-4-20250514"
|
||||
|
||||
# Optional: Customize the trigger phrase (default: @claude)
|
||||
# trigger_phrase: "/claude"
|
||||
|
||||
# Optional: Trigger when specific user is assigned to an issue
|
||||
# assignee_trigger: "claude-bot"
|
||||
|
||||
# Optional: Allow Claude to run specific commands
|
||||
allowed_tools: >-
|
||||
Bash(ansible-playbook * --syntax-check),Bash(ansible-lint *),Bash(ruff check *),
|
||||
Bash(yamllint *),Bash(shellcheck *),Bash(python -m pytest *)
|
||||
|
||||
# Optional: Add custom instructions for Claude to customize its behavior for your project
|
||||
custom_instructions: |
|
||||
Follow Algo's security-first principles
|
||||
Be conservative with dependency updates
|
||||
Run ansible-lint, ruff, yamllint, and shellcheck before suggesting changes
|
||||
Check the CLAUDE.md file for project-specific guidance
|
||||
|
||||
# Optional: Custom environment variables for Claude
|
||||
# claude_env: |
|
||||
# NODE_ENV: test
|
11
.github/workflows/docker-image.yaml
vendored
11
.github/workflows/docker-image.yaml
vendored
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
name: Create and publish a Docker image
|
||||
|
||||
on:
|
||||
'on':
|
||||
push:
|
||||
branches: ['master']
|
||||
|
||||
|
@ -17,12 +18,12 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
|
@ -30,7 +31,7 @@ jobs:
|
|||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1
|
||||
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
|
@ -38,7 +39,7 @@ jobs:
|
|||
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
|
|
67
.github/workflows/integration-tests.yml
vendored
67
.github/workflows/integration-tests.yml
vendored
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
name: Integration Tests
|
||||
|
||||
on:
|
||||
'on':
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
paths:
|
||||
|
@ -25,15 +26,15 @@ jobs:
|
|||
matrix:
|
||||
vpn_type: ['wireguard', 'ipsec', 'both']
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
|
@ -46,12 +47,13 @@ jobs:
|
|||
qrencode \
|
||||
openssl \
|
||||
linux-headers-$(uname -r)
|
||||
|
||||
|
||||
- name: Install uv
|
||||
run: curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
run: uv sync
|
||||
|
||||
- name: Create test configuration
|
||||
run: |
|
||||
cat > integration-test.cfg << EOF
|
||||
|
@ -87,7 +89,7 @@ jobs:
|
|||
endpoint: 127.0.0.1
|
||||
ssh_port: 4160
|
||||
EOF
|
||||
|
||||
|
||||
- name: Run Algo deployment
|
||||
run: |
|
||||
sudo ansible-playbook main.yml \
|
||||
|
@ -96,7 +98,7 @@ jobs:
|
|||
-e @integration-test.cfg \
|
||||
-e "provider=local" \
|
||||
-vv
|
||||
|
||||
|
||||
- name: Verify services are running
|
||||
run: |
|
||||
# Check WireGuard
|
||||
|
@ -109,7 +111,7 @@ jobs:
|
|||
fi
|
||||
echo "✓ WireGuard is running"
|
||||
fi
|
||||
|
||||
|
||||
# Check StrongSwan
|
||||
if [[ "${{ matrix.vpn_type }}" == "ipsec" || "${{ matrix.vpn_type }}" == "both" ]]; then
|
||||
echo "Checking StrongSwan..."
|
||||
|
@ -120,18 +122,18 @@ jobs:
|
|||
fi
|
||||
echo "✓ StrongSwan is running"
|
||||
fi
|
||||
|
||||
|
||||
# Check dnsmasq
|
||||
if ! sudo systemctl is-active --quiet dnsmasq; then
|
||||
echo "⚠️ dnsmasq not running (may be expected)"
|
||||
else
|
||||
echo "✓ dnsmasq is running"
|
||||
fi
|
||||
|
||||
|
||||
- name: Verify generated configs
|
||||
run: |
|
||||
echo "Checking generated configuration files..."
|
||||
|
||||
|
||||
# WireGuard configs
|
||||
if [[ "${{ matrix.vpn_type }}" == "wireguard" || "${{ matrix.vpn_type }}" == "both" ]]; then
|
||||
for user in alice bob; do
|
||||
|
@ -146,7 +148,7 @@ jobs:
|
|||
done
|
||||
echo "✓ All WireGuard configs generated"
|
||||
fi
|
||||
|
||||
|
||||
# IPsec configs
|
||||
if [[ "${{ matrix.vpn_type }}" == "ipsec" || "${{ matrix.vpn_type }}" == "both" ]]; then
|
||||
for user in alice bob; do
|
||||
|
@ -161,36 +163,36 @@ jobs:
|
|||
done
|
||||
echo "✓ All IPsec configs generated"
|
||||
fi
|
||||
|
||||
|
||||
- name: Test VPN connectivity
|
||||
run: |
|
||||
echo "Testing basic VPN connectivity..."
|
||||
|
||||
|
||||
# Test WireGuard
|
||||
if [[ "${{ matrix.vpn_type }}" == "wireguard" || "${{ matrix.vpn_type }}" == "both" ]]; then
|
||||
# Get server's WireGuard public key
|
||||
SERVER_PUBKEY=$(sudo wg show wg0 public-key)
|
||||
echo "Server public key: $SERVER_PUBKEY"
|
||||
|
||||
|
||||
# Check if interface has peers
|
||||
PEER_COUNT=$(sudo wg show wg0 peers | wc -l)
|
||||
echo "✓ WireGuard has $PEER_COUNT peer(s) configured"
|
||||
fi
|
||||
|
||||
|
||||
# Test StrongSwan
|
||||
if [[ "${{ matrix.vpn_type }}" == "ipsec" || "${{ matrix.vpn_type }}" == "both" ]]; then
|
||||
# Check IPsec policies
|
||||
sudo ipsec statusall | grep -E "INSTALLED|ESTABLISHED" || echo "No active IPsec connections (expected)"
|
||||
fi
|
||||
|
||||
|
||||
- name: Upload configs as artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: vpn-configs-${{ matrix.vpn_type }}-${{ github.run_id }}
|
||||
path: configs/
|
||||
retention-days: 7
|
||||
|
||||
|
||||
- name: Upload logs on failure
|
||||
if: failure()
|
||||
run: |
|
||||
|
@ -208,24 +210,24 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
- name: Build Algo Docker image
|
||||
run: |
|
||||
docker build -t algo:ci-test .
|
||||
|
||||
|
||||
- name: Test Docker image
|
||||
run: |
|
||||
# Test that the image can run and show help
|
||||
docker run --rm --entrypoint /bin/sh algo:ci-test -c "cd /algo && ./algo --help" || true
|
||||
|
||||
|
||||
# Test that required binaries exist in the virtual environment
|
||||
docker run --rm --entrypoint /bin/sh algo:ci-test -c "cd /algo && source .env/bin/activate && which ansible"
|
||||
docker run --rm --entrypoint /bin/sh algo:ci-test -c "cd /algo && uv run which ansible"
|
||||
docker run --rm --entrypoint /bin/sh algo:ci-test -c "which python3"
|
||||
docker run --rm --entrypoint /bin/sh algo:ci-test -c "which rsync"
|
||||
|
||||
|
||||
- name: Test Docker config validation
|
||||
run: |
|
||||
# Create a minimal valid config
|
||||
|
@ -242,9 +244,8 @@ jobs:
|
|||
dns_encryption: true
|
||||
algo_provider: ec2
|
||||
EOF
|
||||
|
||||
|
||||
# Test that config is readable
|
||||
docker run --rm --entrypoint cat -v $(pwd)/test-data:/data algo:ci-test /data/config.cfg
|
||||
|
||||
echo "✓ Docker image built and basic tests passed"
|
||||
|
||||
echo "✓ Docker image built and basic tests passed"
|
||||
|
|
100
.github/workflows/lint.yml
vendored
100
.github/workflows/lint.yml
vendored
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
name: Lint
|
||||
|
||||
on: [push, pull_request]
|
||||
'on': [push, pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
@ -10,66 +11,71 @@ jobs:
|
|||
name: Ansible linting
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install ansible-lint and dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install ansible-lint ansible
|
||||
# Install required ansible collections
|
||||
ansible-galaxy collection install community.crypto
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Install Ansible collections
|
||||
run: uv run --with ansible-lint --with ansible ansible-galaxy collection install -r requirements.yml
|
||||
|
||||
- name: Run ansible-lint
|
||||
run: |
|
||||
# Run with || true temporarily while we make the linter less strict
|
||||
ansible-lint -v *.yml roles/{local,cloud-*}/*/*.yml || true
|
||||
uv run --with ansible-lint ansible-lint .
|
||||
|
||||
- name: Run playbook dry-run check (catch runtime issues)
|
||||
run: |
|
||||
# Test main playbook logic without making changes
|
||||
# This catches filter warnings, collection issues, and runtime errors
|
||||
uv run ansible-playbook main.yml --check --connection=local \
|
||||
-e "server_ip=test" \
|
||||
-e "server_name=ci-test" \
|
||||
-e "IP_subject_alt_name=192.168.1.1" \
|
||||
|| echo "Dry-run check completed with issues - review output above"
|
||||
|
||||
yaml-lint:
|
||||
name: YAML linting
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Run yamllint
|
||||
run: |
|
||||
pip install yamllint
|
||||
yamllint -c .yamllint . || true # Start with warnings only
|
||||
run: uv run --with yamllint yamllint -c .yamllint .
|
||||
|
||||
python-lint:
|
||||
name: Python linting
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install Python linters
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install ruff
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Run ruff
|
||||
run: |
|
||||
# Fast Python linter
|
||||
ruff check . || true # Start with warnings only
|
||||
uv run --with ruff ruff check .
|
||||
|
||||
shellcheck:
|
||||
name: Shell script linting
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
@ -78,3 +84,47 @@ jobs:
|
|||
sudo apt-get update && sudo apt-get install -y shellcheck
|
||||
# Check all shell scripts, not just algo and install.sh
|
||||
find . -type f -name "*.sh" -not -path "./.git/*" -exec shellcheck {} \;
|
||||
|
||||
powershell-lint:
|
||||
name: PowerShell script linting
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install PowerShell
|
||||
run: |
|
||||
# Install PowerShell Core
|
||||
wget -q https://github.com/PowerShell/PowerShell/releases/download/v7.4.0/powershell_7.4.0-1.deb_amd64.deb
|
||||
sudo dpkg -i powershell_7.4.0-1.deb_amd64.deb
|
||||
sudo apt-get install -f
|
||||
|
||||
- name: Install PSScriptAnalyzer
|
||||
run: |
|
||||
pwsh -Command "Install-Module -Name PSScriptAnalyzer -Force -Scope CurrentUser"
|
||||
|
||||
- name: Run PowerShell syntax check
|
||||
run: |
|
||||
# Check syntax by parsing the script
|
||||
pwsh -NoProfile -NonInteractive -Command "
|
||||
try {
|
||||
\$null = [System.Management.Automation.PSParser]::Tokenize((Get-Content -Path './algo.ps1' -Raw), [ref]\$null)
|
||||
Write-Host '✓ PowerShell syntax check passed'
|
||||
} catch {
|
||||
Write-Error 'PowerShell syntax error: ' + \$_.Exception.Message
|
||||
exit 1
|
||||
}
|
||||
"
|
||||
|
||||
- name: Run PSScriptAnalyzer
|
||||
run: |
|
||||
pwsh -Command "
|
||||
\$results = Invoke-ScriptAnalyzer -Path './algo.ps1' -Severity Warning,Error
|
||||
if (\$results.Count -gt 0) {
|
||||
\$results | Format-Table -AutoSize
|
||||
exit 1
|
||||
} else {
|
||||
Write-Host '✓ PSScriptAnalyzer check passed'
|
||||
}
|
||||
"
|
||||
|
|
80
.github/workflows/main.yml
vendored
80
.github/workflows/main.yml
vendored
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
name: Main
|
||||
|
||||
on:
|
||||
'on':
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
@ -17,21 +18,18 @@ jobs:
|
|||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Check Ansible playbook syntax
|
||||
run: ansible-playbook main.yml --syntax-check
|
||||
run: uv run ansible-playbook main.yml --syntax-check
|
||||
|
||||
basic-tests:
|
||||
name: Basic sanity tests
|
||||
|
@ -39,30 +37,21 @@ jobs:
|
|||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install jinja2 # For template rendering tests
|
||||
sudo apt-get update && sudo apt-get install -y shellcheck
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Install system dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y shellcheck
|
||||
|
||||
- name: Run basic sanity tests
|
||||
run: |
|
||||
python tests/unit/test_basic_sanity.py
|
||||
python tests/unit/test_config_validation.py
|
||||
python tests/unit/test_user_management.py
|
||||
python tests/unit/test_openssl_compatibility.py
|
||||
python tests/unit/test_cloud_provider_configs.py
|
||||
python tests/unit/test_template_rendering.py
|
||||
python tests/unit/test_generated_configs.py
|
||||
run: uv run pytest tests/unit/ -v
|
||||
|
||||
docker-build:
|
||||
name: Docker build test
|
||||
|
@ -70,18 +59,15 @@ jobs:
|
|||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Build Docker image
|
||||
run: docker build -t local/algo:test .
|
||||
|
@ -92,7 +78,7 @@ jobs:
|
|||
docker run --rm local/algo:test /algo/algo --help
|
||||
|
||||
- name: Run Docker deployment tests
|
||||
run: python tests/unit/test_docker_localhost_deployment.py
|
||||
run: uv run pytest tests/unit/test_docker_localhost_deployment.py -v
|
||||
|
||||
config-generation:
|
||||
name: Configuration generation test
|
||||
|
@ -101,18 +87,15 @@ jobs:
|
|||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Test configuration generation (local mode)
|
||||
run: |
|
||||
|
@ -130,18 +113,15 @@ jobs:
|
|||
matrix:
|
||||
provider: [local, ec2, digitalocean, gce]
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Create test configuration for ${{ matrix.provider }}
|
||||
run: |
|
||||
|
@ -174,7 +154,7 @@ jobs:
|
|||
- name: Run Ansible check mode for ${{ matrix.provider }}
|
||||
run: |
|
||||
# Run ansible in check mode to validate playbooks work
|
||||
ansible-playbook main.yml \
|
||||
uv run ansible-playbook main.yml \
|
||||
-i "localhost," \
|
||||
-c local \
|
||||
-e @test-${{ matrix.provider }}.cfg \
|
||||
|
@ -183,6 +163,6 @@ jobs:
|
|||
--diff \
|
||||
-vv \
|
||||
--skip-tags "facts,tests,local,update-alternatives,cloud_api" || true
|
||||
|
||||
|
||||
# The || true is because check mode will fail on some tasks
|
||||
# but we're looking for syntax/undefined variable errors
|
||||
|
|
139
.github/workflows/smart-tests.yml
vendored
139
.github/workflows/smart-tests.yml
vendored
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
name: Smart Test Selection
|
||||
|
||||
on:
|
||||
'on':
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
|
@ -22,11 +23,11 @@ jobs:
|
|||
run_lint: ${{ steps.filter.outputs.lint }}
|
||||
run_integration: ${{ steps.filter.outputs.integration }}
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2.11.1
|
||||
|
||||
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
|
@ -39,7 +40,8 @@ jobs:
|
|||
- 'library/**'
|
||||
python:
|
||||
- '**/*.py'
|
||||
- 'requirements.txt'
|
||||
- 'pyproject.toml'
|
||||
- 'uv.lock'
|
||||
- 'tests/**'
|
||||
docker:
|
||||
- 'Dockerfile*'
|
||||
|
@ -75,21 +77,18 @@ jobs:
|
|||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Check Ansible playbook syntax
|
||||
run: ansible-playbook main.yml --syntax-check
|
||||
run: uv run ansible-playbook main.yml --syntax-check
|
||||
|
||||
basic-tests:
|
||||
name: Basic Sanity Tests
|
||||
|
@ -99,37 +98,40 @@ jobs:
|
|||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install jinja2 pyyaml # For tests
|
||||
sudo apt-get update && sudo apt-get install -y shellcheck
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Install system dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y shellcheck
|
||||
|
||||
- name: Run relevant tests
|
||||
env:
|
||||
RUN_BASIC_TESTS: ${{ needs.changed-files.outputs.run_basic_tests }}
|
||||
RUN_TEMPLATE_TESTS: ${{ needs.changed-files.outputs.run_template_tests }}
|
||||
run: |
|
||||
# Always run basic sanity
|
||||
python tests/unit/test_basic_sanity.py
|
||||
|
||||
uv run pytest tests/unit/test_basic_sanity.py -v
|
||||
|
||||
# Run other tests based on what changed
|
||||
if [[ "${{ needs.changed-files.outputs.run_basic_tests }}" == "true" ]]; then
|
||||
python tests/unit/test_config_validation.py
|
||||
python tests/unit/test_user_management.py
|
||||
python tests/unit/test_openssl_compatibility.py
|
||||
python tests/unit/test_cloud_provider_configs.py
|
||||
python tests/unit/test_generated_configs.py
|
||||
if [[ "${RUN_BASIC_TESTS}" == "true" ]]; then
|
||||
uv run pytest \
|
||||
tests/unit/test_config_validation.py \
|
||||
tests/unit/test_user_management.py \
|
||||
tests/unit/test_openssl_compatibility.py \
|
||||
tests/unit/test_cloud_provider_configs.py \
|
||||
tests/unit/test_generated_configs.py \
|
||||
-v
|
||||
fi
|
||||
|
||||
if [[ "${{ needs.changed-files.outputs.run_template_tests }}" == "true" ]]; then
|
||||
python tests/unit/test_template_rendering.py
|
||||
|
||||
if [[ "${RUN_TEMPLATE_TESTS}" == "true" ]]; then
|
||||
uv run pytest tests/unit/test_template_rendering.py -v
|
||||
fi
|
||||
|
||||
docker-tests:
|
||||
|
@ -140,18 +142,15 @@ jobs:
|
|||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Build Docker image
|
||||
run: docker build -t local/algo:test .
|
||||
|
@ -161,7 +160,7 @@ jobs:
|
|||
docker run --rm local/algo:test /algo/algo --help
|
||||
|
||||
- name: Run Docker deployment tests
|
||||
run: python tests/unit/test_docker_localhost_deployment.py
|
||||
run: uv run pytest tests/unit/test_docker_localhost_deployment.py -v
|
||||
|
||||
config-tests:
|
||||
name: Configuration Tests
|
||||
|
@ -172,18 +171,15 @@ jobs:
|
|||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Test configuration generation
|
||||
run: |
|
||||
|
@ -208,8 +204,8 @@ jobs:
|
|||
server: test-server
|
||||
endpoint: 10.0.0.1
|
||||
EOF
|
||||
|
||||
ansible-playbook main.yml \
|
||||
|
||||
uv run ansible-playbook main.yml \
|
||||
-i "localhost," \
|
||||
-c local \
|
||||
-e @test-local.cfg \
|
||||
|
@ -227,31 +223,30 @@ jobs:
|
|||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install linting tools
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install ansible-lint ansible yamllint ruff
|
||||
- name: Setup uv environment
|
||||
uses: ./.github/actions/setup-uv
|
||||
|
||||
- name: Install ansible dependencies
|
||||
run: ansible-galaxy collection install community.crypto
|
||||
run: uv run ansible-galaxy collection install community.crypto
|
||||
|
||||
- name: Run relevant linters
|
||||
env:
|
||||
RUN_LINT: ${{ needs.changed-files.outputs.run_lint }}
|
||||
run: |
|
||||
# Always run if lint files changed
|
||||
if [[ "${{ needs.changed-files.outputs.run_lint }}" == "true" ]]; then
|
||||
if [[ "${RUN_LINT}" == "true" ]]; then
|
||||
# Run all linters
|
||||
ruff check . || true
|
||||
yamllint . || true
|
||||
ansible-lint || true
|
||||
|
||||
uv run --with ruff ruff check . || true
|
||||
uv run --with yamllint yamllint . || true
|
||||
uv run --with ansible-lint ansible-lint || true
|
||||
|
||||
# Check shell scripts if any changed
|
||||
if git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ github.sha }} | grep -q '\.sh$'; then
|
||||
find . -name "*.sh" -type f -exec shellcheck {} + || true
|
||||
|
@ -265,14 +260,20 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check test results
|
||||
env:
|
||||
SYNTAX_CHECK_RESULT: ${{ needs.syntax-check.result }}
|
||||
BASIC_TESTS_RESULT: ${{ needs.basic-tests.result }}
|
||||
DOCKER_TESTS_RESULT: ${{ needs.docker-tests.result }}
|
||||
CONFIG_TESTS_RESULT: ${{ needs.config-tests.result }}
|
||||
LINT_RESULT: ${{ needs.lint.result }}
|
||||
run: |
|
||||
# This job ensures all required tests pass
|
||||
# It will fail if any dependent job failed
|
||||
if [[ "${{ needs.syntax-check.result }}" == "failure" ]] || \
|
||||
[[ "${{ needs.basic-tests.result }}" == "failure" ]] || \
|
||||
[[ "${{ needs.docker-tests.result }}" == "failure" ]] || \
|
||||
[[ "${{ needs.config-tests.result }}" == "failure" ]] || \
|
||||
[[ "${{ needs.lint.result }}" == "failure" ]]; then
|
||||
if [[ "${SYNTAX_CHECK_RESULT}" == "failure" ]] || \
|
||||
[[ "${BASIC_TESTS_RESULT}" == "failure" ]] || \
|
||||
[[ "${DOCKER_TESTS_RESULT}" == "failure" ]] || \
|
||||
[[ "${CONFIG_TESTS_RESULT}" == "failure" ]] || \
|
||||
[[ "${LINT_RESULT}" == "failure" ]]; then
|
||||
echo "One or more required tests failed"
|
||||
exit 1
|
||||
fi
|
||||
|
@ -290,4 +291,4 @@ jobs:
|
|||
run: |
|
||||
echo "Integration tests should be triggered for this PR"
|
||||
echo "Changed files indicate potential breaking changes"
|
||||
echo "Run workflow manually: .github/workflows/integration-tests.yml"
|
||||
echo "Run workflow manually: .github/workflows/integration-tests.yml"
|
||||
|
|
25
.github/workflows/test-effectiveness.yml
vendored
25
.github/workflows/test-effectiveness.yml
vendored
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
name: Test Effectiveness Tracking
|
||||
|
||||
on:
|
||||
'on':
|
||||
schedule:
|
||||
- cron: '0 0 * * 0' # Weekly on Sunday
|
||||
workflow_dispatch: # Allow manual runs
|
||||
|
@ -16,26 +17,26 @@ jobs:
|
|||
name: Analyze Test Effectiveness
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: true
|
||||
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
|
||||
- name: Analyze test effectiveness
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
python scripts/track-test-effectiveness.py
|
||||
|
||||
|
||||
- name: Upload metrics
|
||||
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: test-effectiveness-metrics
|
||||
path: .metrics/
|
||||
|
||||
|
||||
- name: Create issue if tests are ineffective
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
@ -44,7 +45,7 @@ jobs:
|
|||
if grep -q "⚠️" .metrics/test-effectiveness-report.md; then
|
||||
# Check if issue already exists
|
||||
existing=$(gh issue list --label "test-effectiveness" --state open --json number --jq '.[0].number')
|
||||
|
||||
|
||||
if [ -z "$existing" ]; then
|
||||
gh issue create \
|
||||
--title "Test Effectiveness Review Needed" \
|
||||
|
@ -55,14 +56,14 @@ jobs:
|
|||
gh issue comment $existing --body-file .metrics/test-effectiveness-report.md
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
- name: Commit metrics if changed
|
||||
run: |
|
||||
git config --local user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config --local user.name "github-actions[bot]"
|
||||
|
||||
|
||||
if [[ -n $(git status -s .metrics/) ]]; then
|
||||
git add .metrics/
|
||||
git commit -m "chore: Update test effectiveness metrics [skip ci]"
|
||||
git push
|
||||
fi
|
||||
fi
|
||||
|
|
7
.gitignore
vendored
7
.gitignore
vendored
|
@ -3,9 +3,10 @@
|
|||
configs/*
|
||||
inventory_users
|
||||
*.kate-swp
|
||||
*env
|
||||
.env/
|
||||
.venv/
|
||||
.DS_Store
|
||||
venvs/*
|
||||
!venvs/.gitinit
|
||||
.vagrant
|
||||
.ansible/
|
||||
__pycache__/
|
||||
*.pyc
|
||||
|
|
10
.yamllint
10
.yamllint
|
@ -1,6 +1,16 @@
|
|||
---
|
||||
extends: default
|
||||
|
||||
# Cloud-init files must be excluded from normal YAML rules
|
||||
# The #cloud-config header cannot have a space and cannot have --- document start
|
||||
ignore: |
|
||||
files/cloud-init/
|
||||
.env/
|
||||
.venv/
|
||||
.ansible/
|
||||
configs/
|
||||
tests/integration/test-configs/
|
||||
|
||||
rules:
|
||||
line-length:
|
||||
max: 160
|
||||
|
|
136
CHANGELOG.md
136
CHANGELOG.md
|
@ -1,136 +0,0 @@
|
|||
## 1.2 [(Unreleased)](https://github.com/trailofbits/algo/tree/HEAD)
|
||||
|
||||
### Added
|
||||
- New provider CloudStack added [\#1420](https://github.com/trailofbits/algo/pull/1420)
|
||||
- Support for Ubuntu 20.04 [\#1782](https://github.com/trailofbits/algo/pull/1782)
|
||||
- Allow WireGuard to listen on port 53 [\#1594](https://github.com/trailofbits/algo/pull/1594)
|
||||
- Introducing Makefile [\#1553](https://github.com/trailofbits/algo/pull/1553)
|
||||
- Option to unblock SMB and Netbios [\#1558](https://github.com/trailofbits/algo/pull/1558)
|
||||
- Allow OnDemand to be toggled later [\#1557](https://github.com/trailofbits/algo/pull/1557)
|
||||
- New provider Hetzner added [\#1549](https://github.com/trailofbits/algo/pull/1549)
|
||||
- Alternative Ingress IP [\#1605](https://github.com/trailofbits/algo/pull/1605)
|
||||
|
||||
### Fixes
|
||||
- WSL private SSH key permissions [\#1584](https://github.com/trailofbits/algo/pull/1584)
|
||||
- Scaleway instance creating issue [\#1549](https://github.com/trailofbits/algo/pull/1549)
|
||||
|
||||
### Changed
|
||||
- Discontinue use of the WireGuard PPA [\#1855](https://github.com/trailofbits/algo/pull/1855)
|
||||
- SSH changes [\#1636](https://github.com/trailofbits/algo/pull/1636)
|
||||
- Default port is set to `4160` and can be changed in the config
|
||||
- SSH user for every cloud provider is `algo`
|
||||
- EC2: enable EBS encryption by default [\#1556](https://github.com/trailofbits/algo/pull/1556)
|
||||
- Upgrades [\#1549](https://github.com/trailofbits/algo/pull/1549)
|
||||
- Python 3
|
||||
- Ansible 2.9 [\#1777](https://github.com/trailofbits/algo/pull/1777)
|
||||
|
||||
### Breaking changes
|
||||
- Python virtual environment moved to .env [\#1549](https://github.com/trailofbits/algo/pull/1549)
|
||||
|
||||
|
||||
## 1.1 [(Jul 31, 2019)](https://github.com/trailofbits/algo/releases/tag/v1.1)
|
||||
|
||||
### Removed
|
||||
- IKEv2 for Windows is now deleted, use Wireguard [\#1493](https://github.com/trailofbits/algo/issues/1493)
|
||||
|
||||
### Added
|
||||
- Tmpfs for key generation [\#145](https://github.com/trailofbits/algo/issues/145)
|
||||
- Randomly generated pre-shared keys for WireGuard [\#1465](https://github.com/trailofbits/algo/pull/1465) ([elreydetoda](https://github.com/elreydetoda))
|
||||
- Support for Ubuntu 19.04 [\#1405](https://github.com/trailofbits/algo/pull/1405) ([jackivanov](https://github.com/jackivanov))
|
||||
- AWS support for existing EIP [\#1292](https://github.com/trailofbits/algo/pull/1292) ([statik](https://github.com/statik))
|
||||
- Script to support cloud-init and local easy deploy [\#1366](https://github.com/trailofbits/algo/pull/1366) ([jackivanov](https://github.com/jackivanov))
|
||||
- Automatically create cloud firewall rules for installs onto Vultr [\#1400](https://github.com/trailofbits/algo/pull/1400) ([TC1977](https://github.com/TC1977))
|
||||
- Randomly generated IP address for the local dns resolver [\#1429](https://github.com/trailofbits/algo/pull/1429) ([jackivanov](https://github.com/jackivanov))
|
||||
- Update users: add server pick-list [\#1441](https://github.com/trailofbits/algo/pull/1441) ([TC1977](https://github.com/TC1977))
|
||||
- Additional testing [\#213](https://github.com/trailofbits/algo/issues/213)
|
||||
- Add IPv6 support to DNS [\#1425](https://github.com/trailofbits/algo/pull/1425) ([shapiro125](https://github.com/shapiro125))
|
||||
- Additional p12 with the CA cert included [\#1403](https://github.com/trailofbits/algo/pull/1403) ([jackivanov](https://github.com/jackivanov))
|
||||
|
||||
### Fixed
|
||||
- Fixes error in 10-algo-lo100.network [\#1369](https://github.com/trailofbits/algo/pull/1369) ([adamluk](https://github.com/adamluk))
|
||||
- Error message is missing for some roles [\#1364](https://github.com/trailofbits/algo/issues/1364)
|
||||
- DNS leak in Linux/Wireguard when LAN gateway/DNS is 172.16.0.1 [\#1422](https://github.com/trailofbits/algo/issues/1422)
|
||||
- Installation error after \#1397 [\#1409](https://github.com/trailofbits/algo/issues/1409)
|
||||
- EC2 encrypted images bug [\#1528](https://github.com/trailofbits/algo/issues/1528)
|
||||
|
||||
### Changed
|
||||
- Upgrade Ansible to 2.7.12 [\#1536](https://github.com/trailofbits/algo/pull/1536)
|
||||
- DNSmasq removed, and the DNS adblocking functionality has been moved to the dnscrypt-proxy
|
||||
- Azure: moved to the Standard_B1S image size
|
||||
- Refactoring, Linting and additional tests [\#1397](https://github.com/trailofbits/algo/pull/1397) ([jackivanov](https://github.com/jackivanov))
|
||||
- Scaleway modules [\#1410](https://github.com/trailofbits/algo/pull/1410) ([jackivanov](https://github.com/jackivanov))
|
||||
- Use VULTR_API_CONFIG variable if set [\#1374](https://github.com/trailofbits/algo/pull/1374) ([davidemyers](https://github.com/davidemyers))
|
||||
- Simplify Apple Profile Configuration Template [\#1033](https://github.com/trailofbits/algo/pull/1033) ([faf0](https://github.com/faf0))
|
||||
- Include roles as separate tasks [\#1365](https://github.com/trailofbits/algo/pull/1365) ([jackivanov](https://github.com/jackivanov))
|
||||
|
||||
## 1.0 [(Mar 19, 2019)](https://github.com/trailofbits/algo/releases/tag/v1.0)
|
||||
|
||||
### Added
|
||||
- Tagged releases and changelog [\#724](https://github.com/trailofbits/algo/issues/724)
|
||||
- Add support for custom domain names [\#759](https://github.com/trailofbits/algo/issues/759)
|
||||
|
||||
### Fixed
|
||||
- Set the name shown to the user \(client\) to be the server name specified in the install script [\#491](https://github.com/trailofbits/algo/issues/491)
|
||||
- AGPLv3 change [\#1351](https://github.com/trailofbits/algo/pull/1351)
|
||||
- Migrate to python3 [\#1024](https://github.com/trailofbits/algo/issues/1024)
|
||||
- Reorganize the project around ipsec + wireguard [\#1330](https://github.com/trailofbits/algo/issues/1330)
|
||||
- Configuration folder reorganization [\#1330](https://github.com/trailofbits/algo/issues/1330)
|
||||
- Remove WireGuard KeepAlive and include as an option in config [\#1251](https://github.com/trailofbits/algo/issues/1251)
|
||||
- Dnscrypt-proxy no longer works after reboot [\#1356](https://github.com/trailofbits/algo/issues/1356)
|
||||
|
||||
## 20 Oct 2018
|
||||
### Added
|
||||
- AWS Lightsail
|
||||
|
||||
## 7 Sep 2018
|
||||
### Changed
|
||||
- Azure: Deployment via Azure Resource Manager
|
||||
|
||||
## 27 Aug 2018
|
||||
### Changed
|
||||
- Large refactor to support Ansible 2.5. [Details](https://github.com/trailofbits/algo/pull/976)
|
||||
- Add a new cloud provider - Vultr
|
||||
|
||||
### Upgrade notes
|
||||
- If any problems encountered follow the [instructions](https://github.com/trailofbits/algo#deploy-the-algo-server) from scratch
|
||||
- You can't update users on your old servers with the new code. Use the old code before this release or rebuild the server from scratch
|
||||
- Update AWS IAM permissions for your user as per [issue](https://github.com/trailofbits/algo/issues/1079#issuecomment-416577599)
|
||||
|
||||
## 04 Jun 2018
|
||||
### Changed
|
||||
- Switched to [new cipher suite](https://github.com/trailofbits/algo/issues/981)
|
||||
|
||||
## 24 May 2018
|
||||
### Changed
|
||||
- Switched to Ubuntu 18.04
|
||||
|
||||
### Removed
|
||||
- Lightsail support until they have Ubuntu 18.04
|
||||
|
||||
### Fixed
|
||||
- Scaleway API paginagion
|
||||
|
||||
## 30 Apr 2018
|
||||
### Added
|
||||
- WireGuard support
|
||||
|
||||
### Removed
|
||||
- Android StrongSwan profiles
|
||||
|
||||
### Release notes
|
||||
- StrongSwan profiles for Android are deprecated now. Use WireGuard
|
||||
|
||||
## 25 Apr 2018
|
||||
### Added
|
||||
- DNScrypt-proxy added
|
||||
- Switched to CloudFlare DNS-over-HTTPS by default
|
||||
|
||||
## 19 Apr 2018
|
||||
### Added
|
||||
- IPv6 in subjectAltName of the certificates. This allows connecting to the Algo instance via the main IPv6 address
|
||||
|
||||
### Fixed
|
||||
- IPv6 DNS addresses were not passing to the client
|
||||
|
||||
### Release notes
|
||||
- In order to use the IPv6 address as the connection endpoint you need to [reinit](https://github.com/trailofbits/algo/blob/master/config.cfg#L14) the PKI and [reconfigure](https://github.com/trailofbits/algo#configure-the-vpn-clients) your devices with new certificates.
|
335
CLAUDE.md
Normal file
335
CLAUDE.md
Normal file
|
@ -0,0 +1,335 @@
|
|||
# CLAUDE.md - LLM Guidance for Algo VPN
|
||||
|
||||
This document provides essential context and guidance for LLMs working on the Algo VPN codebase. It captures important learnings, patterns, and best practices discovered through extensive work with this project.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Algo is an Ansible-based tool that sets up a personal VPN in the cloud. It's designed to be:
|
||||
- **Security-focused**: Creates hardened VPN servers with minimal attack surface
|
||||
- **Easy to use**: Automated deployment with sensible defaults
|
||||
- **Multi-platform**: Supports various cloud providers and operating systems
|
||||
- **Privacy-preserving**: No logging, minimal data retention
|
||||
|
||||
### Core Technologies
|
||||
- **VPN Protocols**: WireGuard (preferred) and IPsec/IKEv2
|
||||
- **Configuration Management**: Ansible (currently v9.x)
|
||||
- **Languages**: Python, YAML, Shell, Jinja2 templates
|
||||
- **Supported Providers**: AWS, Azure, DigitalOcean, GCP, Vultr, Hetzner, local deployment
|
||||
|
||||
## Architecture and Structure
|
||||
|
||||
### Directory Layout
|
||||
```
|
||||
algo/
|
||||
├── main.yml # Primary playbook
|
||||
├── users.yml # User management playbook
|
||||
├── server.yml # Server-specific tasks
|
||||
├── config.cfg # Main configuration file
|
||||
├── pyproject.toml # Python project configuration and dependencies
|
||||
├── uv.lock # Exact dependency versions lockfile
|
||||
├── requirements.yml # Ansible collections
|
||||
├── roles/ # Ansible roles
|
||||
│ ├── common/ # Base system configuration
|
||||
│ ├── wireguard/ # WireGuard VPN setup
|
||||
│ ├── strongswan/ # IPsec/IKEv2 setup
|
||||
│ ├── dns/ # DNS configuration (dnsmasq, dnscrypt)
|
||||
│ ├── ssh_tunneling/ # SSH tunnel setup
|
||||
│ └── cloud-*/ # Cloud provider specific roles
|
||||
├── library/ # Custom Ansible modules
|
||||
├── playbooks/ # Supporting playbooks
|
||||
└── tests/ # Test suite
|
||||
└── unit/ # Python unit tests
|
||||
```
|
||||
|
||||
### Key Roles
|
||||
- **common**: Firewall rules, system hardening, package management
|
||||
- **wireguard**: WireGuard server/client configuration
|
||||
- **strongswan**: IPsec server setup with certificate generation
|
||||
- **dns**: DNS encryption and ad blocking
|
||||
- **cloud-\***: Provider-specific instance creation
|
||||
|
||||
## Critical Dependencies and Version Management
|
||||
|
||||
### Current Versions (MUST maintain compatibility)
|
||||
```
|
||||
ansible==11.8.0 # Stay current to get latest security, performance and bugfixes
|
||||
jinja2~=3.1.6 # Security fix for CVE-2025-27516
|
||||
netaddr==1.3.0 # Network address manipulation
|
||||
```
|
||||
|
||||
### Version Update Guidelines
|
||||
1. **Be Conservative**: Prefer minor version bumps over major ones
|
||||
2. **Security First**: Always prioritize security updates (CVEs)
|
||||
3. **Test Thoroughly**: Run all tests before updating
|
||||
4. **Document Changes**: Explain why each update is necessary
|
||||
|
||||
### Ansible Collections
|
||||
Currently unpinned in `requirements.yml`, but key ones include:
|
||||
- `community.general`
|
||||
- `ansible.posix`
|
||||
- `openstack.cloud`
|
||||
|
||||
## Development Practices
|
||||
|
||||
### Code Style and Linting
|
||||
|
||||
#### Python (ruff)
|
||||
```toml
|
||||
# pyproject.toml configuration
|
||||
[tool.ruff]
|
||||
target-version = "py311"
|
||||
line-length = 120
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E", "W", "F", "I", "B", "C4", "UP"]
|
||||
```
|
||||
|
||||
#### YAML (yamllint)
|
||||
- Document start markers (`---`) required
|
||||
- No trailing spaces
|
||||
- Newline at end of file
|
||||
- Quote `'on':` in GitHub workflows (truthy value)
|
||||
|
||||
#### Shell Scripts (shellcheck)
|
||||
- Quote all variables: `"${var}"`
|
||||
- Use `set -euo pipefail` for safety
|
||||
|
||||
#### PowerShell Scripts (PSScriptAnalyzer)
|
||||
- Use approved verbs (Get-, Set-, New-, etc.)
|
||||
- Avoid positional parameters in functions
|
||||
- Use proper error handling with try/catch
|
||||
- **Note**: Algo's PowerShell script is a WSL wrapper since Ansible doesn't run natively on Windows
|
||||
|
||||
#### Ansible (ansible-lint)
|
||||
- Many warnings are suppressed in `.ansible-lint`
|
||||
- Focus on errors, not warnings
|
||||
- Common suppressions: `name[missing]`, `risky-file-permissions`
|
||||
|
||||
#### Documentation Style
|
||||
- Avoid excessive header nesting (prefer 2-3 levels maximum)
|
||||
- Don't overuse bold formatting in lists - use sparingly for emphasis only
|
||||
- Write flowing paragraphs instead of choppy bullet-heavy sections
|
||||
- Keep formatting clean and readable - prefer natural text over visual noise
|
||||
- Use numbered lists for procedures, simple bullets for feature lists
|
||||
- Example: "Navigate to Network → Interfaces" not "**Navigate** to **Network** → **Interfaces**"
|
||||
|
||||
### Git Workflow
|
||||
1. Create feature branches from `master`
|
||||
2. Make atomic commits with clear messages
|
||||
3. Run all linters before pushing
|
||||
4. Update PR description with test results
|
||||
5. Squash commits if requested
|
||||
|
||||
### Testing Requirements
|
||||
|
||||
Before pushing any changes:
|
||||
```bash
|
||||
# Python tests
|
||||
pytest tests/unit/ -v
|
||||
|
||||
# Ansible syntax
|
||||
ansible-playbook main.yml --syntax-check
|
||||
ansible-playbook users.yml --syntax-check
|
||||
|
||||
# Linters
|
||||
ansible-lint
|
||||
yamllint .
|
||||
ruff check .
|
||||
shellcheck *.sh
|
||||
|
||||
# PowerShell (if available)
|
||||
pwsh -Command "Invoke-ScriptAnalyzer -Path ./algo.ps1"
|
||||
```
|
||||
|
||||
### Writing Effective Tests - Mutation Testing Approach
|
||||
|
||||
When writing tests, **always verify that your test actually detects the failure case**. This is a form of lightweight mutation testing that ensures tests add real value:
|
||||
|
||||
1. **Write the test for the bug/issue you're preventing**
|
||||
2. **Temporarily introduce the bug** to verify the test fails
|
||||
3. **Fix the bug** and verify the test passes
|
||||
4. **Document what specific issue the test prevents**
|
||||
|
||||
Example from our codebase:
|
||||
```python
|
||||
def test_regression_openssl_inline_comments():
|
||||
"""Tests that we detect inline comments in Jinja2 expressions."""
|
||||
# This pattern SHOULD fail (has inline comments)
|
||||
problematic = "{{ ['DNS:' + id, # comment ] }}"
|
||||
assert not validate(problematic), "Should detect inline comments"
|
||||
|
||||
# This pattern SHOULD pass (no inline comments)
|
||||
fixed = "{{ ['DNS:' + id] }}"
|
||||
assert validate(fixed), "Should pass without comments"
|
||||
```
|
||||
|
||||
This practice ensures:
|
||||
- Tests aren't just checking happy paths
|
||||
- Tests will actually catch regressions
|
||||
- The test's purpose is clear to future maintainers
|
||||
- We avoid false confidence from tests that always pass
|
||||
|
||||
## Common Issues and Solutions
|
||||
|
||||
### 1. Ansible-lint "name[missing]" Warnings
|
||||
- Added to skip_list in `.ansible-lint`
|
||||
- Too many tasks to fix immediately (113+)
|
||||
- Focus on new code having proper names
|
||||
|
||||
|
||||
### 3. Jinja2 Template Complexity
|
||||
- Many templates use Ansible-specific filters
|
||||
- Test templates with `tests/unit/test_template_rendering.py`
|
||||
- Mock Ansible filters when testing
|
||||
|
||||
### 4. OpenSSL Version Compatibility
|
||||
```yaml
|
||||
# Check version and use appropriate flags
|
||||
{{ (openssl_version is version('3', '>=')) | ternary('-legacy', '') }}
|
||||
```
|
||||
|
||||
### 5. IPv6 Endpoint Formatting
|
||||
- WireGuard configs must bracket IPv6 addresses
|
||||
- Template logic: `{% if ':' in IP %}[{{ IP }}]:{{ port }}{% else %}{{ IP }}:{{ port }}{% endif %}`
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Always Priority One
|
||||
- **Never expose secrets**: No passwords/keys in commits
|
||||
- **CVE Response**: Update immediately when security issues found
|
||||
- **Least Privilege**: Minimal permissions, dropped capabilities
|
||||
- **Secure Defaults**: Strong crypto, no logging, firewall rules
|
||||
|
||||
### Certificate Management
|
||||
- Elliptic curve cryptography (secp384r1)
|
||||
- Proper CA password handling
|
||||
- Certificate revocation support
|
||||
- Secure storage in `/etc/ipsec.d/`
|
||||
|
||||
### Network Security
|
||||
- Strict firewall rules (iptables/ip6tables)
|
||||
- No IP forwarding except for VPN
|
||||
- DNS leak protection
|
||||
- Kill switch implementation
|
||||
|
||||
## Platform Support
|
||||
|
||||
### Operating Systems
|
||||
- **Primary**: Ubuntu 20.04/22.04 LTS
|
||||
- **Secondary**: Debian 11/12
|
||||
- **Clients**: Windows, macOS, iOS, Android, Linux
|
||||
|
||||
### Cloud Providers
|
||||
Each has specific requirements:
|
||||
- **AWS**: Requires boto3, specific AMI IDs
|
||||
- **Azure**: Complex networking setup
|
||||
- **DigitalOcean**: Simple API, good for testing
|
||||
- **Local**: KVM/Docker for development
|
||||
|
||||
### Architecture Considerations
|
||||
- Support both x86_64 and ARM64
|
||||
- Some providers have limited ARM support
|
||||
- Performance varies by instance type
|
||||
|
||||
## CI/CD Pipeline
|
||||
|
||||
### GitHub Actions Workflows
|
||||
1. **lint.yml**: Runs ansible-lint on all pushes
|
||||
2. **main.yml**: Tests cloud provider configurations
|
||||
3. **smart-tests.yml**: Selective test running based on changes
|
||||
4. **integration-tests.yml**: Full deployment tests (currently disabled)
|
||||
|
||||
### Test Categories
|
||||
- **Unit Tests**: Python-based, test logic and templates
|
||||
- **Syntax Checks**: Ansible playbook validation
|
||||
- **Linting**: Code quality enforcement
|
||||
- **Integration**: Full deployment testing (needs work)
|
||||
|
||||
## Maintenance Guidelines
|
||||
|
||||
### Dependency Updates
|
||||
1. Check for security vulnerabilities monthly
|
||||
2. Update conservatively (minor versions)
|
||||
3. Test on multiple platforms
|
||||
4. Document in PR why updates are needed
|
||||
|
||||
### Issue Triage
|
||||
- Security issues: Priority 1
|
||||
- Broken functionality: Priority 2
|
||||
- Feature requests: Priority 3
|
||||
- Check issues for duplicates
|
||||
|
||||
### Pull Request Standards
|
||||
- Clear description of changes
|
||||
- Test results included
|
||||
- Linter compliance
|
||||
- Conservative approach
|
||||
|
||||
## Working with Algo
|
||||
|
||||
### Local Development Setup
|
||||
```bash
|
||||
# Install dependencies
|
||||
uv sync
|
||||
uv run ansible-galaxy install -r requirements.yml
|
||||
|
||||
# Run local deployment
|
||||
ansible-playbook main.yml -e "provider=local"
|
||||
```
|
||||
|
||||
### Common Tasks
|
||||
|
||||
#### Adding a New User
|
||||
```bash
|
||||
ansible-playbook users.yml -e "server=SERVER_NAME"
|
||||
```
|
||||
|
||||
#### Updating Dependencies
|
||||
1. Create a new branch
|
||||
2. Update pyproject.toml conservatively
|
||||
3. Run `uv lock` to update lockfile
|
||||
4. Run all tests
|
||||
5. Document security fixes
|
||||
|
||||
#### Debugging Deployment Issues
|
||||
1. Check `ansible-playbook -vvv` output
|
||||
2. Verify cloud provider credentials
|
||||
3. Check firewall rules
|
||||
4. Review generated configs in `configs/`
|
||||
|
||||
## Important Context for LLMs
|
||||
|
||||
### What Makes Algo Special
|
||||
- **Simplicity**: One command to deploy
|
||||
- **Security**: Hardened by default
|
||||
- **No Bloat**: Minimal dependencies
|
||||
- **Privacy**: No telemetry or logging
|
||||
|
||||
### User Expectations
|
||||
- It should "just work"
|
||||
- Security is non-negotiable
|
||||
- Backwards compatibility matters
|
||||
- Clear error messages
|
||||
|
||||
### Common User Profiles
|
||||
1. **Privacy Advocates**: Want secure communications
|
||||
2. **Travelers**: Need reliable VPN access
|
||||
3. **Small Teams**: Shared VPN for remote work
|
||||
4. **Developers**: Testing and development
|
||||
|
||||
### Maintenance Philosophy
|
||||
- Stability over features
|
||||
- Security over convenience
|
||||
- Clarity over cleverness
|
||||
- Test everything
|
||||
|
||||
## Final Notes
|
||||
|
||||
When working on Algo:
|
||||
1. **Think Security First**: Every change should maintain or improve security
|
||||
2. **Test Thoroughly**: Multiple platforms, both VPN types
|
||||
3. **Document Clearly**: Users may not be technical
|
||||
4. **Be Conservative**: This is critical infrastructure
|
||||
5. **Respect Privacy**: No tracking, minimal logging
|
||||
|
||||
Remember: People trust Algo with their privacy and security. Every line of code matters.
|
|
@ -1,13 +1,22 @@
|
|||
### Filing New Issues
|
||||
|
||||
* Check that your issue is not already described in the [FAQ](docs/faq.md), [troubleshooting](docs/troubleshooting.md) docs, or an [existing issue](https://github.com/trailofbits/algo/issues)
|
||||
* Did you remember to install the dependencies for your operating system prior to installing Algo?
|
||||
* We only support modern clients, e.g. macOS 10.11+, iOS 9+, Windows 10+, Ubuntu 17.04+, etc.
|
||||
* Cloud provider support is limited to DO, AWS, GCE, and Azure. Any others are best effort only.
|
||||
* If you need to file a new issue, fill out any relevant fields in the Issue Template.
|
||||
* Algo automatically installs dependencies with uv - no manual setup required
|
||||
* We support modern clients: macOS 12+, iOS 15+, Windows 11+, Ubuntu 22.04+, etc.
|
||||
* Supported cloud providers: DigitalOcean, AWS, Azure, GCP, Vultr, Hetzner, Linode, OpenStack, CloudStack
|
||||
* If you need to file a new issue, fill out any relevant fields in the Issue Template
|
||||
|
||||
### Pull Requests
|
||||
|
||||
* Run [ansible-lint](https://github.com/willthames/ansible-lint) or [shellcheck](https://github.com/koalaman/shellcheck) on any new scripts
|
||||
* Run the full linter suite: `./scripts/lint.sh`
|
||||
* Test your changes on multiple platforms when possible
|
||||
* Use conventional commit messages that clearly describe your changes
|
||||
* Pin dependency versions rather than using ranges (e.g., `==1.2.3` not `>=1.2.0`)
|
||||
|
||||
### Development Setup
|
||||
|
||||
* Clone the repository: `git clone https://github.com/trailofbits/algo.git`
|
||||
* Run Algo: `./algo` (dependencies installed automatically via uv)
|
||||
* For local testing, consider using Docker or a cloud provider test instance
|
||||
|
||||
Thanks!
|
||||
|
|
63
Dockerfile
63
Dockerfile
|
@ -1,33 +1,56 @@
|
|||
FROM python:3.11-alpine
|
||||
# syntax=docker/dockerfile:1
|
||||
FROM python:3.12-alpine
|
||||
|
||||
ARG VERSION="git"
|
||||
ARG PACKAGES="bash libffi openssh-client openssl rsync tini gcc libffi-dev linux-headers make musl-dev openssl-dev rust cargo"
|
||||
# Removed rust/cargo (not needed with uv), simplified package list
|
||||
ARG PACKAGES="bash openssh-client openssl rsync tini"
|
||||
|
||||
LABEL name="algo" \
|
||||
version="${VERSION}" \
|
||||
description="Set up a personal IPsec VPN in the cloud" \
|
||||
maintainer="Trail of Bits <http://github.com/trailofbits/algo>"
|
||||
maintainer="Trail of Bits <https://github.com/trailofbits/algo>" \
|
||||
org.opencontainers.image.source="https://github.com/trailofbits/algo" \
|
||||
org.opencontainers.image.description="Algo VPN - Set up a personal IPsec VPN in the cloud" \
|
||||
org.opencontainers.image.licenses="AGPL-3.0"
|
||||
|
||||
RUN apk --no-cache add ${PACKAGES}
|
||||
RUN adduser -D -H -u 19857 algo
|
||||
RUN mkdir -p /algo && mkdir -p /algo/configs
|
||||
# Install system packages in a single layer
|
||||
RUN apk --no-cache add ${PACKAGES} && \
|
||||
adduser -D -H -u 19857 algo && \
|
||||
mkdir -p /algo /algo/configs
|
||||
|
||||
WORKDIR /algo
|
||||
COPY requirements.txt .
|
||||
RUN python3 -m pip --no-cache-dir install -U pip && \
|
||||
python3 -m pip --no-cache-dir install virtualenv && \
|
||||
python3 -m virtualenv .env && \
|
||||
source .env/bin/activate && \
|
||||
python3 -m pip --no-cache-dir install -r requirements.txt
|
||||
COPY . .
|
||||
RUN chmod 0755 /algo/algo-docker.sh
|
||||
|
||||
# Because of the bind mounting of `configs/`, we need to run as the `root` user
|
||||
# This may break in cases where user namespacing is enabled, so hopefully Docker
|
||||
# sorts out a way to set permissions on bind-mounted volumes (`docker run -v`)
|
||||
# before userns becomes default
|
||||
# Note that not running as root will break if we don't have a matching userid
|
||||
# in the container. The filesystem has also been set up to assume root.
|
||||
# Copy uv binary from official image (using latest tag for automatic updates)
|
||||
COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv
|
||||
|
||||
# Copy dependency files and install in single layer for better optimization
|
||||
COPY pyproject.toml uv.lock ./
|
||||
RUN uv sync --locked --no-dev
|
||||
|
||||
# Copy application code
|
||||
COPY . .
|
||||
|
||||
# Set executable permissions and prepare runtime
|
||||
RUN chmod 0755 /algo/algo-docker.sh && \
|
||||
chown -R algo:algo /algo && \
|
||||
# Create volume mount point with correct ownership
|
||||
mkdir -p /data && \
|
||||
chown algo:algo /data
|
||||
|
||||
# Multi-arch support metadata
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
RUN printf "Built on: %s\nTarget: %s\n" "${BUILDPLATFORM}" "${TARGETPLATFORM}" > /algo/build-info
|
||||
|
||||
# Note: Running as root for bind mount compatibility with algo-docker.sh
|
||||
# The script handles /data volume permissions and needs root access
|
||||
# This is a Docker limitation with bind-mounted volumes
|
||||
USER root
|
||||
|
||||
# Health check to ensure container is functional
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD /bin/uv --version || exit 1
|
||||
|
||||
VOLUME ["/data"]
|
||||
CMD [ "/algo/algo-docker.sh" ]
|
||||
ENTRYPOINT [ "/sbin/tini", "--" ]
|
||||
|
|
39
Makefile
39
Makefile
|
@ -1,39 +0,0 @@
|
|||
## docker-build: Build and tag a docker image
|
||||
.PHONY: docker-build
|
||||
|
||||
IMAGE := trailofbits/algo
|
||||
TAG := latest
|
||||
DOCKERFILE := Dockerfile
|
||||
CONFIGURATIONS := $(shell pwd)
|
||||
|
||||
docker-build:
|
||||
docker build \
|
||||
-t $(IMAGE):$(TAG) \
|
||||
-f $(DOCKERFILE) \
|
||||
.
|
||||
|
||||
## docker-deploy: Mount config directory and deploy Algo
|
||||
.PHONY: docker-deploy
|
||||
|
||||
# '--rm' flag removes the container when finished.
|
||||
docker-deploy:
|
||||
docker run \
|
||||
--cap-drop=all \
|
||||
--rm \
|
||||
-it \
|
||||
-v $(CONFIGURATIONS):/data \
|
||||
$(IMAGE):$(TAG)
|
||||
|
||||
## docker-clean: Remove images and containers.
|
||||
.PHONY: docker-prune
|
||||
|
||||
docker-prune:
|
||||
docker images \
|
||||
$(IMAGE) |\
|
||||
awk '{if (NR>1) print $$3}' |\
|
||||
xargs docker rmi
|
||||
|
||||
## docker-all: Build, Deploy, Prune
|
||||
.PHONY: docker-all
|
||||
|
||||
docker-all: docker-build docker-deploy docker-prune
|
|
@ -21,9 +21,11 @@
|
|||
## Checklist:
|
||||
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
|
||||
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
|
||||
- [] I have read the **CONTRIBUTING** document.
|
||||
- [] My code follows the code style of this project.
|
||||
- [] My change requires a change to the documentation.
|
||||
- [] I have updated the documentation accordingly.
|
||||
- [] I have added tests to cover my changes.
|
||||
- [] All new and existing tests passed.
|
||||
- [ ] I have read the **CONTRIBUTING** document.
|
||||
- [ ] My code passes all linters (`./scripts/lint.sh`)
|
||||
- [ ] My code follows the code style of this project.
|
||||
- [ ] My change requires a change to the documentation.
|
||||
- [ ] I have updated the documentation accordingly.
|
||||
- [ ] I have added tests to cover my changes.
|
||||
- [ ] All new and existing tests passed.
|
||||
- [ ] Dependencies use exact versions (e.g., `==1.2.3` not `>=1.2.0`).
|
||||
|
|
129
README.md
129
README.md
|
@ -1,21 +1,22 @@
|
|||
# Algo VPN
|
||||
|
||||
[](https://twitter.com/AlgoVPN)
|
||||
[](https://github.com/trailofbits/algo/actions)
|
||||
[](https://x.com/AlgoVPN)
|
||||
|
||||
Algo VPN is a set of Ansible scripts that simplify the setup of a personal WireGuard and IPsec VPN. It uses the most secure defaults available and works with common cloud providers. See our [release announcement](https://blog.trailofbits.com/2016/12/12/meet-algo-the-vpn-that-works/) for more information.
|
||||
Algo VPN is a set of Ansible scripts that simplify the setup of a personal WireGuard and IPsec VPN. It uses the most secure defaults available and works with common cloud providers.
|
||||
|
||||
See our [release announcement](https://blog.trailofbits.com/2016/12/12/meet-algo-the-vpn-that-works/) for more information.
|
||||
|
||||
## Features
|
||||
|
||||
* Supports only IKEv2 with strong crypto (AES-GCM, SHA2, and P-256) for iOS, macOS, and Linux
|
||||
* Supports [WireGuard](https://www.wireguard.com/) for all of the above, in addition to Android and Windows 10
|
||||
* Supports [WireGuard](https://www.wireguard.com/) for all of the above, in addition to Android and Windows 11
|
||||
* Generates .conf files and QR codes for iOS, macOS, Android, and Windows WireGuard clients
|
||||
* Generates Apple profiles to auto-configure iOS and macOS devices for IPsec - no client software required
|
||||
* Includes a helper script to add and remove users
|
||||
* Blocks ads with a local DNS resolver (optional)
|
||||
* Sets up limited SSH users for tunneling traffic (optional)
|
||||
* Based on current versions of Ubuntu and strongSwan
|
||||
* Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Vultr, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack, CloudStack, Hetzner Cloud, Linode, or [your own Ubuntu server (for more advanced users)](docs/deploy-to-ubuntu.md)
|
||||
* Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Vultr, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack, CloudStack, Hetzner Cloud, Linode, or [your own Ubuntu server (for advanced users)](docs/deploy-to-ubuntu.md)
|
||||
|
||||
## Anti-features
|
||||
|
||||
|
@ -29,9 +30,9 @@ Algo VPN is a set of Ansible scripts that simplify the setup of a personal WireG
|
|||
|
||||
The easiest way to get an Algo server running is to run it on your local system or from [Google Cloud Shell](docs/deploy-from-cloudshell.md) and let it set up a _new_ virtual machine in the cloud for you.
|
||||
|
||||
1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon Lightsail](https://aws.amazon.com/lightsail/), [Amazon EC2](https://aws.amazon.com/), [Vultr](https://www.vultr.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/), [DreamCompute](https://www.dreamhost.com/cloud/computing/), [Linode](https://www.linode.com), or other OpenStack-based cloud hosting, [Exoscale](https://www.exoscale.com) or other CloudStack-based cloud hosting, or [Hetzner Cloud](https://www.hetzner.com/).
|
||||
1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon Lightsail](https://aws.amazon.com/lightsail/), [Amazon EC2](https://aws.amazon.com/), [Vultr](https://www.vultr.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/), [DreamCompute](https://www.dreamhost.com/cloud/computing/), [Linode](https://www.linode.com) other OpenStack-based cloud hosting, [Exoscale](https://www.exoscale.com) or other CloudStack-based cloud hosting, or [Hetzner Cloud](https://www.hetzner.com/).
|
||||
|
||||
2. **Get a copy of Algo.** The Algo scripts will be installed on your local system. There are two ways to get a copy:
|
||||
2. **Get a copy of Algo.** The Algo scripts will be run from your local system. There are two ways to get a copy:
|
||||
|
||||
- Download the [ZIP file](https://github.com/trailofbits/algo/archive/master.zip). Unzip the file to create a directory named `algo-master` containing the Algo scripts.
|
||||
|
||||
|
@ -40,49 +41,23 @@ The easiest way to get an Algo server running is to run it on your local system
|
|||
git clone https://github.com/trailofbits/algo.git
|
||||
```
|
||||
|
||||
3. **Install Algo's core dependencies.** Algo requires that **Python 3.10 or later** and at least one supporting package are installed on your system.
|
||||
3. **Set your configuration options.** Open `config.cfg` in your favorite text editor. Specify the users you want to create in the `users` list. Create a unique user for each device you plan to connect to your VPN. You should also review the other options before deployment, as changing your mind about them later [may require you to deploy a brand new server](https://github.com/trailofbits/algo/blob/master/docs/faq.md#i-deployed-an-algo-server-can-you-update-it-with-new-features).
|
||||
|
||||
- **macOS:** Catalina (10.15) and higher includes Python 3 as part of the optional Command Line Developer Tools package. From Terminal run:
|
||||
|
||||
```bash
|
||||
python3 -m pip install --user --upgrade virtualenv
|
||||
```
|
||||
|
||||
If prompted, install the Command Line Developer Tools and re-run the above command.
|
||||
|
||||
For macOS versions prior to Catalina, see [Deploy from macOS](docs/deploy-from-macos.md) for information on installing Python 3 .
|
||||
|
||||
- **Linux:** Recent releases of Ubuntu, Debian, and Fedora come with Python 3 already installed. If your Python version is not 3.10, then you will need to use pyenv to install Python 3.10. Make sure your system is up-to-date and install the supporting package(s):
|
||||
* Ubuntu and Debian:
|
||||
```bash
|
||||
sudo apt install -y --no-install-recommends python3-virtualenv file lookup
|
||||
```
|
||||
On a Raspberry Pi running Ubuntu also install `libffi-dev` and `libssl-dev`.
|
||||
|
||||
* Fedora:
|
||||
```bash
|
||||
sudo dnf install -y python3-virtualenv
|
||||
```
|
||||
|
||||
- **Windows:** Use the Windows Subsystem for Linux (WSL) to create your own copy of Ubuntu running under Windows from which to install and run Algo. See the [Windows documentation](docs/deploy-from-windows.md) for more information.
|
||||
|
||||
4. **Install Algo's remaining dependencies.** You'll need to run these commands from the Algo directory each time you download a new copy of Algo. In a Terminal window `cd` into the `algo-master` (ZIP file) or `algo` (`git clone`) directory and run:
|
||||
4. **Start the deployment.** Return to your terminal. In the Algo directory, run the appropriate script for your platform:
|
||||
|
||||
**macOS/Linux:**
|
||||
```bash
|
||||
python3 -m virtualenv --python="$(command -v python3)" .env &&
|
||||
source .env/bin/activate &&
|
||||
python3 -m pip install -U pip virtualenv &&
|
||||
python3 -m pip install -r requirements.txt
|
||||
./algo
|
||||
```
|
||||
On Fedora first run `export TMPDIR=/var/tmp`, then add the option `--system-site-packages` to the first command above (after `python3 -m virtualenv`). On macOS install the C compiler if prompted.
|
||||
|
||||
**Windows:**
|
||||
```powershell
|
||||
.\algo.ps1
|
||||
```
|
||||
|
||||
The first time you run the script, it will automatically install the required Python environment (Python 3.11+). On subsequent runs, it starts immediately and works on all platforms (macOS, Linux, Windows via WSL). The Windows PowerShell script automatically uses WSL when needed, since Ansible requires a Unix-like environment. There are several optional features available, none of which are required for a fully functional VPN server. These optional features are described in the [deployment documentation](docs/deploy-from-ansible.md).
|
||||
|
||||
5. **Set your configuration options.** Open the file `config.cfg` in your favorite text editor. Specify the users you wish to create in the `users` list. Create a unique user for each device you plan to connect to your VPN.
|
||||
> Note: [IKEv2 Only] If you want to add or delete users later, you **must** select `yes` at the `Do you want to retain the keys (PKI)?` prompt during the server deployment. You should also review the other options before deployment, as changing your mind about them later [may require you to deploy a brand new server](https://github.com/trailofbits/algo/blob/master/docs/faq.md#i-deployed-an-algo-server-can-you-update-it-with-new-features).
|
||||
|
||||
6. **Start the deployment.** Return to your terminal. In the Algo directory, run `./algo` and follow the instructions. There are several optional features available, none of which are required for a fully functional VPN server. These optional features are described in greater detail in [here](docs/deploy-from-ansible.md).
|
||||
|
||||
That's it! You will get the message below when the server deployment process completes. Take note of the p12 (user certificate) password and the CA key in case you need them later, **they will only be displayed this time**.
|
||||
|
||||
You can now set up clients to connect to your VPN. Proceed to [Configure the VPN Clients](#configure-the-vpn-clients) below.
|
||||
That's it! You can now set up clients to connect to your VPN. Proceed to [Configure the VPN Clients](#configure-the-vpn-clients) below.
|
||||
|
||||
```
|
||||
"# Congratulations! #"
|
||||
|
@ -100,45 +75,45 @@ You can now set up clients to connect to your VPN. Proceed to [Configure the VPN
|
|||
|
||||
Certificates and configuration files that users will need are placed in the `configs` directory. Make sure to secure these files since many contain private keys. All files are saved under a subdirectory named with the IP address of your new Algo VPN server.
|
||||
|
||||
### Apple Devices
|
||||
**Important for IPsec users**: If you want to add or delete users later, you must select `yes` at the `Do you want to retain the keys (PKI)?` prompt during the server deployment. This preserves the certificate authority needed for user management.
|
||||
|
||||
### Apple
|
||||
|
||||
WireGuard is used to provide VPN services on Apple devices. Algo generates a WireGuard configuration file, `wireguard/<username>.conf`, and a QR code, `wireguard/<username>.png`, for each user defined in `config.cfg`.
|
||||
|
||||
On iOS, install the [WireGuard](https://itunes.apple.com/us/app/wireguard/id1441195209?mt=8) app from the iOS App Store. Then, use the WireGuard app to scan the QR code or AirDrop the configuration file to the device.
|
||||
|
||||
On macOS Mojave or later, install the [WireGuard](https://itunes.apple.com/us/app/wireguard/id1451685025?mt=12) app from the Mac App Store. WireGuard will appear in the menu bar once you run the app. Click on the WireGuard icon, choose **Import tunnel(s) from file...**, then select the appropriate WireGuard configuration file.
|
||||
On macOS, install the [WireGuard](https://itunes.apple.com/us/app/wireguard/id1451685025?mt=12) app from the Mac App Store. WireGuard will appear in the menu bar once you run the app. Click on the WireGuard icon, choose **Import tunnel(s) from file...**, then select the appropriate WireGuard configuration file.
|
||||
|
||||
On either iOS or macOS, you can enable "Connect on Demand" and/or exclude certain trusted Wi-Fi networks (such as your home or work) by editing the tunnel configuration in the WireGuard app. (Algo can't do this automatically for you.)
|
||||
|
||||
Installing WireGuard is a little more complicated on older version of macOS. See [Using macOS as a Client with WireGuard](docs/client-macos-wireguard.md).
|
||||
If you prefer to use the built-in IPsec VPN on Apple devices, or need "Connect on Demand" or excluded Wi-Fi networks automatically configured, see the [Apple IPsec client setup guide](docs/client-apple-ipsec.md) for detailed configuration instructions.
|
||||
|
||||
If you prefer to use the built-in IPSEC VPN on Apple devices, or need "Connect on Demand" or excluded Wi-Fi networks automatically configured, then see [Using Apple Devices as a Client with IPSEC](docs/client-apple-ipsec.md).
|
||||
### Android
|
||||
|
||||
### Android Devices
|
||||
|
||||
WireGuard is used to provide VPN services on Android. Install the [WireGuard VPN Client](https://play.google.com/store/apps/details?id=com.wireguard.android). Import the corresponding `wireguard/<name>.conf` file to your device, then setup a new connection with it. See the [Android setup instructions](/docs/client-android.md) for more detailed walkthrough.
|
||||
WireGuard is used to provide VPN services on Android. Install the [WireGuard VPN Client](https://play.google.com/store/apps/details?id=com.wireguard.android). Import the corresponding `wireguard/<name>.conf` file to your device, then set up a new connection with it. See the [Android setup guide](docs/client-android.md) for detailed installation and configuration instructions.
|
||||
|
||||
### Windows
|
||||
|
||||
WireGuard is used to provide VPN services on Windows. Algo generates a WireGuard configuration file, `wireguard/<username>.conf`, for each user defined in `config.cfg`.
|
||||
|
||||
Install the [WireGuard VPN Client](https://www.wireguard.com/install/#windows-7-8-81-10-2012-2016-2019). Import the generated `wireguard/<username>.conf` file to your device, then setup a new connection with it.
|
||||
Install the [WireGuard VPN Client](https://www.wireguard.com/install/#windows-7-8-81-10-2012-2016-2019). Import the generated `wireguard/<username>.conf` file to your device, then set up a new connection with it. See the [Windows setup instructions](docs/client-windows.md) for more detailed walkthrough and troubleshooting.
|
||||
|
||||
### Linux WireGuard Clients
|
||||
### Linux
|
||||
|
||||
WireGuard works great with Linux clients. See [this page](docs/client-linux-wireguard.md) for an example of how to configure WireGuard on Ubuntu.
|
||||
Linux clients can use either WireGuard or IPsec:
|
||||
|
||||
### Linux strongSwan IPsec Clients (e.g., OpenWRT, Ubuntu Server, etc.)
|
||||
WireGuard: WireGuard works great with Linux clients. See the [Linux WireGuard setup guide](docs/client-linux-wireguard.md) for step-by-step instructions on configuring WireGuard on Ubuntu and other distributions.
|
||||
|
||||
Please see [this page](docs/client-linux-ipsec.md).
|
||||
IPsec: For strongSwan IPsec clients (including OpenWrt, Ubuntu Server, and other distributions), see the [Linux IPsec setup guide](docs/client-linux-ipsec.md) for detailed configuration instructions.
|
||||
|
||||
### OpenWrt Wireguard Clients
|
||||
### OpenWrt
|
||||
|
||||
Please see [this page](docs/client-openwrt-router-wireguard.md).
|
||||
For OpenWrt routers using WireGuard, see the [OpenWrt WireGuard setup guide](docs/client-openwrt-router-wireguard.md) for router-specific configuration instructions.
|
||||
|
||||
### Other Devices
|
||||
|
||||
Depending on the platform, you may need one or multiple of the following files.
|
||||
For devices not covered above or manual configuration, you'll need specific certificate and configuration files. The files you need depend on your device platform and VPN protocol (WireGuard or IPsec).
|
||||
|
||||
* ipsec/manual/cacert.pem: CA Certificate
|
||||
* ipsec/manual/<user>.p12: User Certificate and Private Key (in PKCS#12 format)
|
||||
|
@ -150,9 +125,9 @@ Depending on the platform, you may need one or multiple of the following files.
|
|||
|
||||
## Setup an SSH Tunnel
|
||||
|
||||
If you turned on the optional SSH tunneling role, then local user accounts will be created for each user in `config.cfg` and SSH authorized_key files for them will be in the `configs` directory (user.pem). SSH user accounts do not have shell access, cannot authenticate with a password, and only have limited tunneling options (e.g., `ssh -N` is required). This ensures that SSH users have the least access required to setup a tunnel and can perform no other actions on the Algo server.
|
||||
If you turned on the optional SSH tunneling role, local user accounts will be created for each user in `config.cfg`, and SSH authorized_key files for them will be in the `configs` directory (user.pem). SSH user accounts do not have shell access, cannot authenticate with a password, and only have limited tunneling options (e.g., `ssh -N` is required). This ensures that SSH users have the least access required to set up a tunnel and can perform no other actions on the Algo server.
|
||||
|
||||
Use the example command below to start an SSH tunnel by replacing `<user>` and `<ip>` with your own. Once the tunnel is setup, you can configure a browser or other application to use 127.0.0.1:1080 as a SOCKS proxy to route traffic through the Algo server:
|
||||
Use the example command below to start an SSH tunnel by replacing `<user>` and `<ip>` with your own. Once the tunnel is set up, you can configure a browser or other application to use 127.0.0.1:1080 as a SOCKS proxy to route traffic through the Algo server:
|
||||
|
||||
```bash
|
||||
ssh -D 127.0.0.1:1080 -f -q -C -N <user>@algo -i configs/<ip>/ssh-tunnel/<user>.pem -F configs/<ip>/ssh_config
|
||||
|
@ -166,7 +141,7 @@ Your Algo server is configured for key-only SSH access for administrative purpos
|
|||
ssh -F configs/<ip>/ssh_config <hostname>
|
||||
```
|
||||
|
||||
where `<ip>` is the IP address of your Algo server. If you find yourself regularly logging into the server then it will be useful to load your Algo ssh key automatically. Add the following snippet to the bottom of `~/.bash_profile` to add it to your shell environment permanently:
|
||||
where `<ip>` is the IP address of your Algo server. If you find yourself regularly logging into the server, it will be useful to load your Algo SSH key automatically. Add the following snippet to the bottom of `~/.bash_profile` to add it to your shell environment permanently:
|
||||
|
||||
```
|
||||
ssh-add ~/.ssh/algo > /dev/null 2>&1
|
||||
|
@ -182,13 +157,23 @@ where `<algodirectory>` is the directory where you cloned Algo.
|
|||
|
||||
## Adding or Removing Users
|
||||
|
||||
_If you chose to save the CA key during the deploy process,_ then Algo's own scripts can easily add and remove users from the VPN server.
|
||||
Algo makes it easy to add or remove users from your VPN server after initial deployment.
|
||||
|
||||
1. Update the `users` list in your `config.cfg`
|
||||
2. Open a terminal, `cd` to the algo directory, and activate the virtual environment with `source .env/bin/activate`
|
||||
3. Run the command: `./algo update-users`
|
||||
For IPsec users: You must have selected `yes` at the `Do you want to retain the keys (PKI)?` prompt during the initial server deployment. This preserves the certificate authority needed for user management. You should also save the p12 and CA key passwords shown during deployment, as they're only displayed once.
|
||||
|
||||
After this process completes, the Algo VPN server will contain only the users listed in the `config.cfg` file.
|
||||
To add or remove users, first edit the `users` list in your `config.cfg` file. Add new usernames or remove existing ones as needed. Then navigate to the algo directory in your terminal and run:
|
||||
|
||||
**macOS/Linux:**
|
||||
```bash
|
||||
./algo update-users
|
||||
```
|
||||
|
||||
**Windows:**
|
||||
```powershell
|
||||
.\algo.ps1 update-users
|
||||
```
|
||||
|
||||
After the process completes, new configuration files will be generated in the `configs` directory for any new users. The Algo VPN server will be updated to contain only the users listed in the `config.cfg` file. Removed users will no longer be able to connect, and new users will have fresh certificates and configuration files ready for use.
|
||||
|
||||
## Additional Documentation
|
||||
* [FAQ](docs/faq.md)
|
||||
|
@ -211,6 +196,7 @@ After this process completes, the Algo VPN server will contain only the users li
|
|||
* Deploy from a [Docker container](docs/deploy-from-docker.md)
|
||||
|
||||
### Setup VPN Clients to Connect to the Server
|
||||
* Setup [Windows](docs/client-windows.md) clients
|
||||
* Setup [Android](docs/client-android.md) clients
|
||||
* Setup [Linux](docs/client-linux.md) clients with Ansible
|
||||
* Setup Ubuntu clients to use [WireGuard](docs/client-linux-wireguard.md)
|
||||
|
@ -223,7 +209,6 @@ After this process completes, the Algo VPN server will contain only the users li
|
|||
* Deploy from [Ansible](docs/deploy-from-ansible.md) non-interactively
|
||||
* Deploy onto a [cloud server at time of creation with shell script or cloud-init](docs/deploy-from-script-or-cloud-init-to-localhost.md)
|
||||
* Deploy to an [unsupported cloud provider](docs/deploy-to-unsupported-cloud.md)
|
||||
* Deploy to your own [FreeBSD](docs/deploy-to-freebsd.md) server
|
||||
|
||||
If you've read all the documentation and have further questions, [create a new discussion](https://github.com/trailofbits/algo/discussions).
|
||||
|
||||
|
@ -252,14 +237,12 @@ If you've read all the documentation and have further questions, [create a new d
|
|||
-- [Thorin Klosowski](https://twitter.com/kingthor) for [Lifehacker](http://lifehacker.com/how-to-set-up-your-own-completely-free-vpn-in-the-cloud-1794302432)
|
||||
|
||||
## Support Algo VPN
|
||||
[](https://flattr.com/submit/auto?fid=kxw60j&url=https%3A%2F%2Fgithub.com%2Ftrailofbits%2Falgo)
|
||||
[](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=CYZZD39GXUJ3E)
|
||||
[](https://www.patreon.com/algovpn)
|
||||
[](https://www.bountysource.com/teams/trailofbits)
|
||||
|
||||
All donations support continued development. Thanks!
|
||||
|
||||
* We accept donations via [PayPal](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=CYZZD39GXUJ3E), [Patreon](https://www.patreon.com/algovpn), and [Flattr](https://flattr.com/submit/auto?fid=kxw60j&url=https%3A%2F%2Fgithub.com%2Ftrailofbits%2Falgo).
|
||||
* We accept donations via [PayPal](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=CYZZD39GXUJ3E) and [Patreon](https://www.patreon.com/algovpn).
|
||||
* Use our [referral code](https://m.do.co/c/4d7f4ff9cfe4) when you sign up to Digital Ocean for a $10 credit.
|
||||
* We also accept and appreciate contributions of new code and bugfixes via Github Pull Requests.
|
||||
|
||||
|
|
36
Vagrantfile
vendored
36
Vagrantfile
vendored
|
@ -1,36 +0,0 @@
|
|||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "bento/ubuntu-20.04"
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.name = "algo-20.04"
|
||||
v.memory = "512"
|
||||
v.cpus = "1"
|
||||
end
|
||||
|
||||
config.vm.synced_folder "./", "/opt/algo", create: true
|
||||
|
||||
config.vm.provision "ansible_local" do |ansible|
|
||||
ansible.playbook = "/opt/algo/main.yml"
|
||||
|
||||
# https://github.com/hashicorp/vagrant/issues/12204
|
||||
ansible.pip_install_cmd = "sudo apt-get install -y python3-pip python-is-python3 && sudo ln -s -f /usr/bin/pip3 /usr/bin/pip"
|
||||
ansible.install_mode = "pip_args_only"
|
||||
ansible.pip_args = "-r /opt/algo/requirements.txt"
|
||||
ansible.inventory_path = "/opt/algo/inventory"
|
||||
ansible.limit = "local"
|
||||
ansible.verbose = "-vvvv"
|
||||
ansible.extra_vars = {
|
||||
provider: "local",
|
||||
server: "localhost",
|
||||
ssh_user: "",
|
||||
endpoint: "127.0.0.1",
|
||||
ondemand_cellular: true,
|
||||
ondemand_wifi: false,
|
||||
dns_adblocking: true,
|
||||
ssh_tunneling: true,
|
||||
store_pki: true,
|
||||
tests: true,
|
||||
no_log: false
|
||||
}
|
||||
end
|
||||
end
|
168
algo
168
algo
|
@ -2,22 +2,160 @@
|
|||
|
||||
set -e
|
||||
|
||||
if [ -z ${VIRTUAL_ENV+x} ]
|
||||
then
|
||||
ACTIVATE_SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.env/bin/activate"
|
||||
if [ -f "$ACTIVATE_SCRIPT" ]
|
||||
then
|
||||
# shellcheck source=/dev/null
|
||||
source "$ACTIVATE_SCRIPT"
|
||||
else
|
||||
echo "$ACTIVATE_SCRIPT not found. Did you follow documentation to install dependencies?"
|
||||
exit 1
|
||||
fi
|
||||
# Track which installation method succeeded
|
||||
UV_INSTALL_METHOD=""
|
||||
|
||||
# Function to install uv via package managers (most secure)
|
||||
install_uv_via_package_manager() {
|
||||
echo "Attempting to install uv via system package manager..."
|
||||
|
||||
if command -v brew &> /dev/null; then
|
||||
echo "Using Homebrew..."
|
||||
brew install uv && UV_INSTALL_METHOD="Homebrew" && return 0
|
||||
elif command -v apt &> /dev/null && apt list uv 2>/dev/null | grep -q uv; then
|
||||
echo "Using apt..."
|
||||
sudo apt update && sudo apt install -y uv && UV_INSTALL_METHOD="apt" && return 0
|
||||
elif command -v dnf &> /dev/null; then
|
||||
echo "Using dnf..."
|
||||
sudo dnf install -y uv 2>/dev/null && UV_INSTALL_METHOD="dnf" && return 0
|
||||
elif command -v pacman &> /dev/null; then
|
||||
echo "Using pacman..."
|
||||
sudo pacman -S --noconfirm uv 2>/dev/null && UV_INSTALL_METHOD="pacman" && return 0
|
||||
elif command -v zypper &> /dev/null; then
|
||||
echo "Using zypper..."
|
||||
sudo zypper install -y uv 2>/dev/null && UV_INSTALL_METHOD="zypper" && return 0
|
||||
elif command -v winget &> /dev/null; then
|
||||
echo "Using winget..."
|
||||
winget install --id=astral-sh.uv -e && UV_INSTALL_METHOD="winget" && return 0
|
||||
elif command -v scoop &> /dev/null; then
|
||||
echo "Using scoop..."
|
||||
scoop install uv && UV_INSTALL_METHOD="scoop" && return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Function to handle Ubuntu-specific installation alternatives
|
||||
install_uv_ubuntu_alternatives() {
|
||||
# Check if we're on Ubuntu
|
||||
if ! command -v lsb_release &> /dev/null || [[ "$(lsb_release -si)" != "Ubuntu" ]]; then
|
||||
return 1 # Not Ubuntu, skip these options
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Ubuntu detected. Additional trusted installation options available:"
|
||||
echo ""
|
||||
echo "1. pipx (official PyPI, installs ~9 packages)"
|
||||
echo " Command: sudo apt install pipx && pipx install uv"
|
||||
echo ""
|
||||
echo "2. snap (community-maintained by Canonical employee)"
|
||||
echo " Command: sudo snap install astral-uv --classic"
|
||||
echo " Source: https://github.com/lengau/uv-snap"
|
||||
echo ""
|
||||
echo "3. Continue to official installer script download"
|
||||
echo ""
|
||||
|
||||
while true; do
|
||||
read -r -p "Choose installation method (1/2/3): " choice
|
||||
case $choice in
|
||||
1)
|
||||
echo "Installing uv via pipx..."
|
||||
if sudo apt update && sudo apt install -y pipx; then
|
||||
if pipx install uv; then
|
||||
# Add pipx bin directory to PATH
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
UV_INSTALL_METHOD="pipx"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
echo "pipx installation failed, trying next option..."
|
||||
;;
|
||||
2)
|
||||
echo "Installing uv via snap..."
|
||||
if sudo snap install astral-uv --classic; then
|
||||
# Snap binaries should be automatically in PATH via /snap/bin
|
||||
UV_INSTALL_METHOD="snap"
|
||||
return 0
|
||||
fi
|
||||
echo "snap installation failed, trying next option..."
|
||||
;;
|
||||
3)
|
||||
return 1 # Continue to official installer download
|
||||
;;
|
||||
*)
|
||||
echo "Invalid option. Please choose 1, 2, or 3."
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
# Function to install uv via download (with user consent)
|
||||
install_uv_via_download() {
|
||||
echo ""
|
||||
echo "⚠️ SECURITY NOTICE ⚠️"
|
||||
echo "uv is not available via system package managers on this system."
|
||||
echo "To continue, we need to download and execute an installation script from:"
|
||||
echo " https://astral.sh/uv/install.sh (Linux/macOS)"
|
||||
echo " https://astral.sh/uv/install.ps1 (Windows)"
|
||||
echo ""
|
||||
echo "For maximum security, you can install uv manually instead:"
|
||||
echo " 1. Visit: https://docs.astral.sh/uv/getting-started/installation/"
|
||||
echo " 2. Download the binary for your platform from GitHub releases"
|
||||
echo " 3. Verify checksums and install manually"
|
||||
echo " 4. Then run: ./algo"
|
||||
echo ""
|
||||
|
||||
read -p "Continue with script download? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Installation cancelled. Please install uv manually and retry."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Downloading uv installation script..."
|
||||
if [[ "$OSTYPE" == "msys" || "$OSTYPE" == "cygwin" || "$OSTYPE" == "linux-gnu" && -n "${WSL_DISTRO_NAME:-}" ]] || uname -s | grep -q "MINGW\|MSYS"; then
|
||||
# Windows (Git Bash/WSL/MINGW) - use versioned installer
|
||||
powershell -ExecutionPolicy ByPass -c "irm https://github.com/astral-sh/uv/releases/download/0.8.5/uv-installer.ps1 | iex"
|
||||
UV_INSTALL_METHOD="official installer (Windows)"
|
||||
else
|
||||
# macOS/Linux - use the versioned script for consistency
|
||||
curl --proto '=https' --tlsv1.2 -LsSf https://github.com/astral-sh/uv/releases/download/0.8.5/uv-installer.sh | sh
|
||||
UV_INSTALL_METHOD="official installer"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if uv is installed, if not, install it securely
|
||||
if ! command -v uv &> /dev/null; then
|
||||
echo "uv (Python package manager) not found. Installing..."
|
||||
|
||||
# Try package managers first (most secure)
|
||||
if ! install_uv_via_package_manager; then
|
||||
# Try Ubuntu-specific alternatives if available
|
||||
if ! install_uv_ubuntu_alternatives; then
|
||||
# Fall back to download with user consent
|
||||
install_uv_via_download
|
||||
fi
|
||||
fi
|
||||
|
||||
# Reload PATH to find uv (includes pipx, cargo, and snap paths)
|
||||
# Note: This PATH change only affects the current shell session.
|
||||
# Users may need to restart their terminal for subsequent runs.
|
||||
export PATH="$HOME/.local/bin:$HOME/.cargo/bin:/snap/bin:$PATH"
|
||||
|
||||
# Verify installation worked
|
||||
if ! command -v uv &> /dev/null; then
|
||||
echo "Error: uv installation failed. Please restart your terminal and try again."
|
||||
echo "Or install manually from: https://docs.astral.sh/uv/getting-started/installation/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ uv installed successfully via ${UV_INSTALL_METHOD}!"
|
||||
fi
|
||||
|
||||
# Run the appropriate playbook
|
||||
case "$1" in
|
||||
update-users) PLAYBOOK=users.yml; ARGS=( "${@:2}" -t update-users ) ;;
|
||||
*) PLAYBOOK=main.yml; ARGS=( "${@}" ) ;;
|
||||
update-users)
|
||||
uv run ansible-playbook users.yml "${@:2}" -t update-users ;;
|
||||
*)
|
||||
uv run ansible-playbook main.yml "${@}" ;;
|
||||
esac
|
||||
|
||||
ansible-playbook ${PLAYBOOK} "${ARGS[@]}"
|
||||
|
|
|
@ -11,9 +11,9 @@ usage() {
|
|||
retcode="${1:-0}"
|
||||
echo "To run algo from Docker:"
|
||||
echo ""
|
||||
echo "docker run --cap-drop=all -it -v <path to configurations>:"${DATA_DIR}" ghcr.io/trailofbits/algo:latest"
|
||||
echo "docker run --cap-drop=all -it -v <path to configurations>:${DATA_DIR} ghcr.io/trailofbits/algo:latest"
|
||||
echo ""
|
||||
exit ${retcode}
|
||||
exit "${retcode}"
|
||||
}
|
||||
|
||||
if [ ! -f "${DATA_DIR}"/config.cfg ] ; then
|
||||
|
@ -25,7 +25,7 @@ fi
|
|||
|
||||
if [ ! -e /dev/console ] ; then
|
||||
echo "Looks like you're trying to run this container without a TTY."
|
||||
echo "If you don't pass `-t`, you can't interact with the algo script."
|
||||
echo "If you don't pass -t, you can't interact with the algo script."
|
||||
echo ""
|
||||
usage -1
|
||||
fi
|
||||
|
@ -41,4 +41,4 @@ test -d "${DATA_DIR}"/configs && rsync -qLktr --delete "${DATA_DIR}"/configs "${
|
|||
retcode=${?}
|
||||
|
||||
rsync -qLktr --delete "${ALGO_DIR}"/configs "${DATA_DIR}"/
|
||||
exit ${retcode}
|
||||
exit "${retcode}"
|
||||
|
|
|
@ -68,10 +68,12 @@ elif [[ -f LICENSE && ${STAT} ]]; then
|
|||
fi
|
||||
|
||||
# The Python version might be useful to know.
|
||||
if [[ -x ./.env/bin/python3 ]]; then
|
||||
./.env/bin/python3 --version 2>&1
|
||||
if [[ -x $(command -v uv) ]]; then
|
||||
echo "uv Python environment:"
|
||||
uv run python --version 2>&1
|
||||
uv --version 2>&1
|
||||
elif [[ -f ./algo ]]; then
|
||||
echo ".env/bin/python3 not found: has 'python3 -m virtualenv ...' been run?"
|
||||
echo "uv not found: try running './algo' to install dependencies"
|
||||
fi
|
||||
|
||||
# Just print out all command line arguments, which are expected
|
||||
|
|
34
algo.egg-info/PKG-INFO
Normal file
34
algo.egg-info/PKG-INFO
Normal file
|
@ -0,0 +1,34 @@
|
|||
Metadata-Version: 2.4
|
||||
Name: algo
|
||||
Version: 2.0.0b0
|
||||
Summary: Set up a personal IPSEC VPN in the cloud
|
||||
Requires-Python: >=3.11
|
||||
License-File: LICENSE
|
||||
Requires-Dist: ansible==11.8.0
|
||||
Requires-Dist: jinja2>=3.1.6
|
||||
Requires-Dist: netaddr==1.3.0
|
||||
Requires-Dist: pyyaml>=6.0.2
|
||||
Requires-Dist: pyopenssl>=0.15
|
||||
Requires-Dist: segno>=1.6.0
|
||||
Provides-Extra: aws
|
||||
Requires-Dist: boto3>=1.34.0; extra == "aws"
|
||||
Requires-Dist: boto>=2.49.0; extra == "aws"
|
||||
Provides-Extra: azure
|
||||
Requires-Dist: azure-identity>=1.15.0; extra == "azure"
|
||||
Requires-Dist: azure-mgmt-compute>=30.0.0; extra == "azure"
|
||||
Requires-Dist: azure-mgmt-network>=25.0.0; extra == "azure"
|
||||
Requires-Dist: azure-mgmt-resource>=23.0.0; extra == "azure"
|
||||
Requires-Dist: msrestazure>=0.6.4; extra == "azure"
|
||||
Provides-Extra: gcp
|
||||
Requires-Dist: google-auth>=2.28.0; extra == "gcp"
|
||||
Requires-Dist: requests>=2.31.0; extra == "gcp"
|
||||
Provides-Extra: hetzner
|
||||
Requires-Dist: hcloud>=1.33.0; extra == "hetzner"
|
||||
Provides-Extra: linode
|
||||
Requires-Dist: linode-api4>=5.15.0; extra == "linode"
|
||||
Provides-Extra: openstack
|
||||
Requires-Dist: openstacksdk>=2.1.0; extra == "openstack"
|
||||
Provides-Extra: cloudstack
|
||||
Requires-Dist: cs>=3.0.0; extra == "cloudstack"
|
||||
Requires-Dist: sshpubkeys>=3.3.1; extra == "cloudstack"
|
||||
Dynamic: license-file
|
10
algo.egg-info/SOURCES.txt
Normal file
10
algo.egg-info/SOURCES.txt
Normal file
|
@ -0,0 +1,10 @@
|
|||
LICENSE
|
||||
README.md
|
||||
pyproject.toml
|
||||
algo.egg-info/PKG-INFO
|
||||
algo.egg-info/SOURCES.txt
|
||||
algo.egg-info/dependency_links.txt
|
||||
algo.egg-info/requires.txt
|
||||
algo.egg-info/top_level.txt
|
||||
tests/test_cloud_init_template.py
|
||||
tests/test_package_preinstall.py
|
1
algo.egg-info/dependency_links.txt
Normal file
1
algo.egg-info/dependency_links.txt
Normal file
|
@ -0,0 +1 @@
|
|||
|
34
algo.egg-info/requires.txt
Normal file
34
algo.egg-info/requires.txt
Normal file
|
@ -0,0 +1,34 @@
|
|||
ansible==11.8.0
|
||||
jinja2>=3.1.6
|
||||
netaddr==1.3.0
|
||||
pyyaml>=6.0.2
|
||||
pyopenssl>=0.15
|
||||
segno>=1.6.0
|
||||
|
||||
[aws]
|
||||
boto3>=1.34.0
|
||||
boto>=2.49.0
|
||||
|
||||
[azure]
|
||||
azure-identity>=1.15.0
|
||||
azure-mgmt-compute>=30.0.0
|
||||
azure-mgmt-network>=25.0.0
|
||||
azure-mgmt-resource>=23.0.0
|
||||
msrestazure>=0.6.4
|
||||
|
||||
[cloudstack]
|
||||
cs>=3.0.0
|
||||
sshpubkeys>=3.3.1
|
||||
|
||||
[gcp]
|
||||
google-auth>=2.28.0
|
||||
requests>=2.31.0
|
||||
|
||||
[hetzner]
|
||||
hcloud>=1.33.0
|
||||
|
||||
[linode]
|
||||
linode-api4>=5.15.0
|
||||
|
||||
[openstack]
|
||||
openstacksdk>=2.1.0
|
1
algo.egg-info/top_level.txt
Normal file
1
algo.egg-info/top_level.txt
Normal file
|
@ -0,0 +1 @@
|
|||
|
124
algo.ps1
Normal file
124
algo.ps1
Normal file
|
@ -0,0 +1,124 @@
|
|||
# PowerShell script for Windows users to run Algo VPN
|
||||
param(
|
||||
[Parameter(ValueFromRemainingArguments)]
|
||||
[string[]]$Arguments
|
||||
)
|
||||
|
||||
# Check if we're actually running inside WSL (not just if WSL is available)
|
||||
function Test-RunningInWSL {
|
||||
# These environment variables are only set when running inside WSL
|
||||
return $env:WSL_DISTRO_NAME -or $env:WSLENV
|
||||
}
|
||||
|
||||
# Function to run Algo in WSL
|
||||
function Invoke-AlgoInWSL {
|
||||
param($Arguments)
|
||||
|
||||
Write-Host "NOTICE: Ansible requires a Unix-like environment and cannot run natively on Windows."
|
||||
Write-Host "Attempting to run Algo via Windows Subsystem for Linux (WSL)..."
|
||||
Write-Host ""
|
||||
|
||||
if (-not (Get-Command wsl -ErrorAction SilentlyContinue)) {
|
||||
Write-Host "ERROR: WSL (Windows Subsystem for Linux) is not installed." -ForegroundColor Red
|
||||
Write-Host ""
|
||||
Write-Host "Algo requires WSL to run Ansible on Windows. To install WSL:" -ForegroundColor Yellow
|
||||
Write-Host ""
|
||||
Write-Host " Step 1: Open PowerShell as Administrator and run:"
|
||||
Write-Host " wsl --install -d Ubuntu-22.04" -ForegroundColor Cyan
|
||||
Write-Host " (Note: 22.04 LTS recommended for WSL stability)" -ForegroundColor Gray
|
||||
Write-Host ""
|
||||
Write-Host " Step 2: Restart your computer when prompted"
|
||||
Write-Host ""
|
||||
Write-Host " Step 3: After restart, open Ubuntu from the Start menu"
|
||||
Write-Host " and complete the initial setup (create username/password)"
|
||||
Write-Host ""
|
||||
Write-Host " Step 4: Run this script again: .\algo.ps1"
|
||||
Write-Host ""
|
||||
Write-Host "For detailed instructions, see:" -ForegroundColor Yellow
|
||||
Write-Host "https://github.com/trailofbits/algo/blob/master/docs/deploy-from-windows.md"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Check if any WSL distributions are installed and running
|
||||
Write-Host "Checking for WSL Linux distributions..."
|
||||
$wslList = wsl -l -v 2>$null
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Write-Host "ERROR: WSL is installed but no Linux distributions are available." -ForegroundColor Red
|
||||
Write-Host ""
|
||||
Write-Host "You need to install Ubuntu. Run this command as Administrator:" -ForegroundColor Yellow
|
||||
Write-Host " wsl --install -d Ubuntu-22.04" -ForegroundColor Cyan
|
||||
Write-Host " (Note: 22.04 LTS recommended for WSL stability)" -ForegroundColor Gray
|
||||
Write-Host ""
|
||||
Write-Host "Then restart your computer and try again."
|
||||
exit 1
|
||||
}
|
||||
|
||||
Write-Host "Successfully found WSL. Launching Algo..." -ForegroundColor Green
|
||||
Write-Host ""
|
||||
|
||||
# Get current directory name for WSL path mapping
|
||||
$currentDir = Split-Path -Leaf (Get-Location)
|
||||
|
||||
try {
|
||||
if ($Arguments.Count -gt 0 -and $Arguments[0] -eq "update-users") {
|
||||
$remainingArgs = $Arguments[1..($Arguments.Count-1)] -join " "
|
||||
wsl bash -c "cd /mnt/c/$currentDir 2>/dev/null || (echo 'Error: Cannot access directory in WSL. Make sure you are running from a Windows drive (C:, D:, etc.)' && exit 1) && ./algo update-users $remainingArgs"
|
||||
} else {
|
||||
$allArgs = $Arguments -join " "
|
||||
wsl bash -c "cd /mnt/c/$currentDir 2>/dev/null || (echo 'Error: Cannot access directory in WSL. Make sure you are running from a Windows drive (C:, D:, etc.)' && exit 1) && ./algo $allArgs"
|
||||
}
|
||||
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Write-Host ""
|
||||
Write-Host "Algo finished with exit code: $LASTEXITCODE" -ForegroundColor Yellow
|
||||
if ($LASTEXITCODE -eq 1) {
|
||||
Write-Host "This may indicate a configuration issue or user cancellation."
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
Write-Host ""
|
||||
Write-Host "ERROR: Failed to run Algo in WSL." -ForegroundColor Red
|
||||
Write-Host "Error details: $($_.Exception.Message)" -ForegroundColor Red
|
||||
Write-Host ""
|
||||
Write-Host "Troubleshooting:" -ForegroundColor Yellow
|
||||
Write-Host "1. Make sure you're running from a Windows drive (C:, D:, etc.)"
|
||||
Write-Host "2. Try opening Ubuntu directly and running: cd /mnt/c/$currentDir && ./algo"
|
||||
Write-Host "3. See: https://github.com/trailofbits/algo/blob/master/docs/deploy-from-windows.md"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
# Main execution
|
||||
try {
|
||||
# Check if we're actually running inside WSL
|
||||
if (Test-RunningInWSL) {
|
||||
Write-Host "Detected WSL environment. Running Algo using standard Unix approach..."
|
||||
|
||||
# Verify bash is available (should be in WSL)
|
||||
if (-not (Get-Command bash -ErrorAction SilentlyContinue)) {
|
||||
Write-Host "ERROR: Running in WSL but bash is not available." -ForegroundColor Red
|
||||
Write-Host "Your WSL installation may be incomplete. Try running:" -ForegroundColor Yellow
|
||||
Write-Host " wsl --shutdown" -ForegroundColor Cyan
|
||||
Write-Host " wsl" -ForegroundColor Cyan
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Run the standard Unix algo script
|
||||
& bash -c "./algo $($Arguments -join ' ')"
|
||||
exit $LASTEXITCODE
|
||||
}
|
||||
|
||||
# We're on native Windows - need to use WSL
|
||||
Invoke-AlgoInWSL $Arguments
|
||||
|
||||
} catch {
|
||||
Write-Host ""
|
||||
Write-Host "UNEXPECTED ERROR:" -ForegroundColor Red
|
||||
Write-Host $_.Exception.Message -ForegroundColor Red
|
||||
Write-Host ""
|
||||
Write-Host "If you continue to have issues:" -ForegroundColor Yellow
|
||||
Write-Host "1. Ensure WSL is properly installed and Ubuntu is set up"
|
||||
Write-Host "2. See troubleshooting guide: https://github.com/trailofbits/algo/blob/master/docs/deploy-from-windows.md"
|
||||
Write-Host "3. Or use WSL directly: open Ubuntu and run './algo'"
|
||||
exit 1
|
||||
}
|
|
@ -7,6 +7,7 @@ timeout = 60
|
|||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
force_valid_group_names = ignore
|
||||
remote_tmp = /tmp/.ansible/tmp
|
||||
|
||||
[paramiko_connection]
|
||||
record_host_keys = False
|
||||
|
|
18
config.cfg
18
config.cfg
|
@ -12,6 +12,18 @@ users:
|
|||
|
||||
### Review these options BEFORE you run Algo, as they are very difficult/impossible to change after the server is deployed.
|
||||
|
||||
# Performance optimizations (reduces deployment time)
|
||||
# Skip reboots unless kernel was updated (saves 0-5 minutes)
|
||||
performance_skip_optional_reboots: false
|
||||
# Use parallel key generation for certificates (saves 1-2 minutes)
|
||||
performance_parallel_crypto: false
|
||||
# Batch install all packages in one operation (saves 30-60 seconds)
|
||||
performance_parallel_packages: false
|
||||
# Pre-install universal packages via cloud-init (saves 30-90 seconds)
|
||||
performance_preinstall_packages: false
|
||||
# Configure VPN services in parallel (saves 1-2 minutes)
|
||||
performance_parallel_services: false
|
||||
|
||||
# Change default SSH port for the cloud roles only
|
||||
# It doesn't apply if you deploy to your existing Ubuntu Server
|
||||
ssh_port: 4160
|
||||
|
@ -197,11 +209,11 @@ wireguard_network_ipv4: 10.49.0.0/16
|
|||
wireguard_network_ipv6: 2001:db8:a160::/48
|
||||
|
||||
# Randomly generated IP address for the local dns resolver
|
||||
local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
|
||||
local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
|
||||
local_service_ip: "{{ '172.16.0.1' | ansible.utils.ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
|
||||
local_service_ipv6: "{{ 'fd00::1' | ansible.utils.ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
|
||||
|
||||
# Hide sensitive data
|
||||
no_log: true
|
||||
algo_no_log: true
|
||||
|
||||
congrats:
|
||||
common: |
|
||||
|
|
61
docs/aws-credentials.md
Normal file
61
docs/aws-credentials.md
Normal file
|
@ -0,0 +1,61 @@
|
|||
# AWS Credential Configuration
|
||||
|
||||
Algo supports multiple methods for providing AWS credentials, following standard AWS practices:
|
||||
|
||||
## Methods (in order of precedence)
|
||||
|
||||
1. **Command-line variables** (highest priority)
|
||||
```bash
|
||||
./algo -e "aws_access_key=YOUR_KEY aws_secret_key=YOUR_SECRET"
|
||||
```
|
||||
|
||||
2. **Environment variables**
|
||||
```bash
|
||||
export AWS_ACCESS_KEY_ID=YOUR_KEY
|
||||
export AWS_SECRET_ACCESS_KEY=YOUR_SECRET
|
||||
export AWS_SESSION_TOKEN=YOUR_TOKEN # Optional, for temporary credentials
|
||||
./algo
|
||||
```
|
||||
|
||||
3. **AWS credentials file** (lowest priority)
|
||||
- Default location: `~/.aws/credentials`
|
||||
- Custom location: Set `AWS_SHARED_CREDENTIALS_FILE` environment variable
|
||||
- Profile selection: Set `AWS_PROFILE` environment variable (defaults to "default")
|
||||
|
||||
## Using AWS Credentials File
|
||||
|
||||
After running `aws configure` or manually creating `~/.aws/credentials`:
|
||||
|
||||
```ini
|
||||
[default]
|
||||
aws_access_key_id = YOUR_KEY_ID
|
||||
aws_secret_access_key = YOUR_SECRET_KEY
|
||||
|
||||
[work]
|
||||
aws_access_key_id = WORK_KEY_ID
|
||||
aws_secret_access_key = WORK_SECRET_KEY
|
||||
aws_session_token = TEMPORARY_TOKEN # Optional
|
||||
```
|
||||
|
||||
To use a specific profile:
|
||||
```bash
|
||||
AWS_PROFILE=work ./algo
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- Credentials files should have restricted permissions (600)
|
||||
- Consider using AWS IAM roles or temporary credentials when possible
|
||||
- Tools like [aws-vault](https://github.com/99designs/aws-vault) can provide additional security by storing credentials encrypted
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If Algo isn't finding your credentials:
|
||||
|
||||
1. Check file permissions: `ls -la ~/.aws/credentials`
|
||||
2. Verify the profile name matches: `AWS_PROFILE=your-profile`
|
||||
3. Test with AWS CLI: `aws sts get-caller-identity`
|
||||
|
||||
If credentials are found but authentication fails:
|
||||
- Ensure your IAM user has the required permissions (see [EC2 deployment guide](deploy-from-ansible.md))
|
||||
- Check if you need session tokens for temporary credentials
|
|
@ -6,10 +6,10 @@ Find the corresponding `mobileconfig` (Apple Profile) for each user and send it
|
|||
|
||||
## Enable the VPN
|
||||
|
||||
On iOS, connect to the VPN by opening **Settings** and clicking the toggle next to "VPN" near the top of the list. If using WireGuard, you can also enable the VPN from the WireGuard app. On macOS, connect to the VPN by opening **System Preferences** -> **Network**, finding the Algo VPN in the left column, and clicking "Connect." Check "Show VPN status in menu bar" to easily connect and disconnect from the menu bar.
|
||||
On iOS, connect to the VPN by opening **Settings** and clicking the toggle next to "VPN" near the top of the list. If using WireGuard, you can also enable the VPN from the WireGuard app. On macOS, connect to the VPN by opening **System Settings** -> **Network** (or **VPN** on macOS Sequoia 15.0+), finding the Algo VPN in the left column, and clicking "Connect." Check "Show VPN status in menu bar" to easily connect and disconnect from the menu bar.
|
||||
|
||||
## Managing "Connect On Demand"
|
||||
|
||||
If you enable "Connect On Demand", the VPN will connect automatically whenever it is able. Most Apple users will want to enable "Connect On Demand", but if you do then simply disabling the VPN will not cause it to stay disabled; it will just "Connect On Demand" again. To disable the VPN you'll need to disable "Connect On Demand".
|
||||
|
||||
On iOS, you can turn off "Connect On Demand" in **Settings** by clicking the (i) next to the entry for your Algo VPN and toggling off "Connect On Demand." On macOS, you can turn off "Connect On Demand" by opening **System Preferences** -> **Network**, finding the Algo VPN in the left column, unchecking the box for "Connect on demand", and clicking Apply.
|
||||
On iOS, you can turn off "Connect On Demand" in **Settings** by clicking the (i) next to the entry for your Algo VPN and toggling off "Connect On Demand." On macOS, you can turn off "Connect On Demand" by opening **System Settings** -> **Network** (or **VPN** on macOS Sequoia 15.0+), finding the Algo VPN in the left column, unchecking the box for "Connect on demand", and clicking Apply.
|
|
@ -4,7 +4,7 @@ Install strongSwan, then copy the included ipsec_user.conf, ipsec_user.secrets,
|
|||
|
||||
## Ubuntu Server example
|
||||
|
||||
1. `sudo apt-get install strongswan libstrongswan-standard-plugins`: install strongSwan
|
||||
1. `sudo apt install strongswan libstrongswan-standard-plugins`: install strongSwan
|
||||
2. `/etc/ipsec.d/certs`: copy `<name>.crt` from `algo-master/configs/<server_ip>/ipsec/.pki/certs/<name>.crt`
|
||||
3. `/etc/ipsec.d/private`: copy `<name>.key` from `algo-master/configs/<server_ip>/ipsec/.pki/private/<name>.key`
|
||||
4. `/etc/ipsec.d/cacerts`: copy `cacert.pem` from `algo-master/configs/<server_ip>/ipsec/manual/cacert.pem`
|
||||
|
|
|
@ -13,7 +13,7 @@ sudo apt update && sudo apt upgrade
|
|||
|
||||
# Install WireGuard:
|
||||
sudo apt install wireguard
|
||||
# Note: openresolv is no longer needed on Ubuntu 22.10+
|
||||
# Note: openresolv is no longer needed on Ubuntu 22.04 LTS+
|
||||
```
|
||||
|
||||
For installation on other Linux distributions, see the [Installation](https://www.wireguard.com/install/) page on the WireGuard site.
|
||||
|
|
|
@ -1,88 +1,190 @@
|
|||
# Using Router with OpenWRT as a Client with WireGuard
|
||||
This scenario is useful in case you want to use vpn with devices which has no vpn capability like smart tv, or make vpn connection available via router for multiple devices.
|
||||
This is a tested, working scenario with following environment:
|
||||
# OpenWrt Router as WireGuard Client
|
||||
|
||||
- algo installed ubuntu at digitalocean
|
||||
- client side router "TP-Link TL-WR1043ND" with openwrt ver. 21.02.1. [Openwrt Install instructions](https://openwrt.org/toh/tp-link/tl-wr1043nd)
|
||||
- or client side router "TP-Link Archer C20i AC750" with openwrt ver. 21.02.1. [Openwrt install instructions](https://openwrt.org/toh/tp-link/archer_c20i)
|
||||
see compatible device list at https://openwrt.org/toh/start . Theoretically, any of the devices on the list should work
|
||||
This guide explains how to configure an OpenWrt router as a WireGuard VPN client, allowing all devices connected to your network to route traffic through your Algo VPN automatically. This setup is ideal for devices that don't support VPN natively (smart TVs, IoT devices, game consoles) or when you want seamless VPN access for all network clients.
|
||||
|
||||
## Use Cases
|
||||
|
||||
- Connect devices without native VPN support (smart TVs, gaming consoles, IoT devices)
|
||||
- Automatically route all connected devices through the VPN
|
||||
- Create a secure connection when traveling with multiple devices
|
||||
- Configure VPN once at the router level instead of per-device
|
||||
|
||||
## Router setup
|
||||
Make sure that you have
|
||||
- router with openwrt installed,
|
||||
- router is connected to internet,
|
||||
- router and device in front of router do not have the same IP. By default, OpenWrt has 192.168.1.1 if so change it to something like 192.168.2.1
|
||||
### Install required packages(WebUI)
|
||||
- Open router web UI (mostly http://192.168.1.1)
|
||||
- Login. (by default username: root, password:<empty>
|
||||
- System -> Software, click "Update lists"
|
||||
- Install following packages wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5
|
||||
- restart router
|
||||
## Prerequisites
|
||||
|
||||
### Alternative Install required packages(ssh)
|
||||
- Open router web UI (mostly http://192.168.1.1)
|
||||
- ssh root@192.168.1.1
|
||||
- opkg update
|
||||
- opkg install wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5
|
||||
- reboot
|
||||
You'll need an OpenWrt-compatible router with sufficient RAM (minimum 64MB recommended) and OpenWrt 23.05 or later installed. Your Algo VPN server must be deployed and running, and you'll need the WireGuard configuration file from your Algo deployment.
|
||||
|
||||
### Create an Interface(WebUI)
|
||||
- Open router web UI
|
||||
- Navigate Network -> Interface
|
||||
- Click "Add new interface"
|
||||
- Give a Name. e.g. `AlgoVpn`
|
||||
- Select Protocol. `Wireguard VPN`
|
||||
- click `Create Interface`
|
||||
- In *General Settings* tab
|
||||
- `Bring up on boot` *checked*
|
||||
- Private key: `Interface -> Private Key` from algo config file
|
||||
- Ip Address: `Interface -> Address` from algo config file
|
||||
- In *Peers* tab
|
||||
- Click add
|
||||
- Name `algo`
|
||||
- Public key: `[Peer]->PublicKey` from algo config file
|
||||
- Preshared key: `[Peer]->PresharedKey` from algo config file
|
||||
- Allowed IPs: 0.0.0.0/0
|
||||
- Route Allowed IPs: checked
|
||||
- Endpoint Host: `[Peer]->Endpoint` ip from algo config file
|
||||
- Endpoint Port: `[Peer]->Endpoint` port from algo config file
|
||||
- Persistent Keep Alive: `25`
|
||||
- Click Save & Save Apply
|
||||
Ensure your router's LAN subnet doesn't conflict with upstream networks. The default OpenWrt IP is `192.168.1.1` - change to `192.168.2.1` if conflicts exist.
|
||||
|
||||
### Configure Firewall(WebUI)
|
||||
- Open router web UI
|
||||
- Navigate to Network -> Firewall
|
||||
- Click `Add configuration`:
|
||||
- Name: e.g. ivpn_fw
|
||||
- Input: Reject
|
||||
- Output: Accept
|
||||
- Forward: Reject
|
||||
- Masquerading: Checked
|
||||
- MSS clamping: Checked
|
||||
- Covered networks: Select created VPN interface
|
||||
- Allow forward to destination zones - Unspecified
|
||||
- Allow forward from source zones - lan
|
||||
- Click Save & Save Apply
|
||||
- Reboot router
|
||||
This configuration has been verified on TP-Link TL-WR1043ND and TP-Link Archer C20i AC750 with OpenWrt 23.05+. For compatibility with other devices, check the [OpenWrt Table of Hardware](https://openwrt.org/toh/start).
|
||||
|
||||
## Install Required Packages
|
||||
|
||||
There may be additional configuration required depending on environment like dns configuration.
|
||||
### Web Interface Method
|
||||
|
||||
You can also verify the configuration using ssh. /etc/config/network. It should look like
|
||||
1. Access your router's web interface (typically `http://192.168.1.1`)
|
||||
2. Login with your credentials (default: username `root`, no password)
|
||||
3. Navigate to System → Software
|
||||
4. Click "Update lists" to refresh the package database
|
||||
5. Search for and install these packages:
|
||||
- `wireguard-tools`
|
||||
- `kmod-wireguard`
|
||||
- `luci-app-wireguard`
|
||||
- `wireguard`
|
||||
- `kmod-crypto-sha256`
|
||||
- `kmod-crypto-sha1`
|
||||
- `kmod-crypto-md5`
|
||||
6. Restart the router after installation completes
|
||||
|
||||
### SSH Method
|
||||
|
||||
1. SSH into your router: `ssh root@192.168.1.1`
|
||||
2. Update the package list:
|
||||
```bash
|
||||
opkg update
|
||||
```
|
||||
3. Install required packages:
|
||||
```bash
|
||||
opkg install wireguard-tools kmod-wireguard luci-app-wireguard wireguard kmod-crypto-sha256 kmod-crypto-sha1 kmod-crypto-md5
|
||||
```
|
||||
4. Reboot the router:
|
||||
```bash
|
||||
reboot
|
||||
```
|
||||
|
||||
## Locate Your WireGuard Configuration
|
||||
|
||||
Before proceeding, locate your WireGuard configuration file from your Algo deployment. This file is typically located at:
|
||||
```
|
||||
configs/<server_ip>/wireguard/<username>.conf
|
||||
```
|
||||
config interface 'algo'
|
||||
option proto 'wireguard'
|
||||
list addresses '10.0.0.2/32'
|
||||
option private_key '......' # The private key generated by itself just now
|
||||
|
||||
config wireguard_wg0
|
||||
option public_key '......' # Server's public key
|
||||
Your configuration file should look similar to:
|
||||
```ini
|
||||
[Interface]
|
||||
PrivateKey = <your_private_key>
|
||||
Address = 10.49.0.2/16
|
||||
DNS = 172.16.0.1
|
||||
|
||||
[Peer]
|
||||
PublicKey = <server_public_key>
|
||||
PresharedKey = <preshared_key>
|
||||
AllowedIPs = 0.0.0.0/0, ::/0
|
||||
Endpoint = <server_ip>:51820
|
||||
PersistentKeepalive = 25
|
||||
```
|
||||
|
||||
## Configure WireGuard Interface
|
||||
|
||||
1. In the OpenWrt web interface, navigate to Network → Interfaces
|
||||
2. Click "Add new interface..."
|
||||
3. Set the name to `AlgoVPN` (or your preferred name) and select "WireGuard VPN" as the protocol
|
||||
4. Click "Create interface"
|
||||
|
||||
In the General Settings tab:
|
||||
- Check "Bring up on boot"
|
||||
- Enter your private key from the Algo config file
|
||||
- Add your IP address from the Algo config file (e.g., `10.49.0.2/16`)
|
||||
|
||||
Switch to the Peers tab and click "Add peer":
|
||||
- Description: `Algo Server`
|
||||
- Public Key: Copy from the `[Peer]` section of your config
|
||||
- Preshared Key: Copy from the `[Peer]` section of your config
|
||||
- Allowed IPs: `0.0.0.0/0, ::/0` (routes all traffic through VPN)
|
||||
- Route Allowed IPs: Check this box
|
||||
- Endpoint Host: Extract the IP address from the `Endpoint` line
|
||||
- Endpoint Port: Extract the port from the `Endpoint` line (typically `51820`)
|
||||
- Persistent Keep Alive: `25`
|
||||
|
||||
Click "Save & Apply".
|
||||
|
||||
## Configure Firewall Rules
|
||||
|
||||
1. Navigate to Network → Firewall
|
||||
2. Click "Add" to create a new zone
|
||||
3. Configure the firewall zone:
|
||||
- Name: `vpn`
|
||||
- Input: `Reject`
|
||||
- Output: `Accept`
|
||||
- Forward: `Reject`
|
||||
- Masquerading: Check this box
|
||||
- MSS clamping: Check this box
|
||||
- Covered networks: Select your WireGuard interface (`AlgoVPN`)
|
||||
|
||||
4. In the Inter-Zone Forwarding section:
|
||||
- Allow forward from source zones: Select `lan`
|
||||
- Allow forward to destination zones: Leave unspecified
|
||||
|
||||
5. Click "Save & Apply"
|
||||
6. Reboot your router to ensure all changes take effect
|
||||
|
||||
## Verification and Testing
|
||||
|
||||
Navigate to Network → Interfaces and verify your WireGuard interface shows as "Connected" with a green status. Check that it has received the correct IP address.
|
||||
|
||||
From a device connected to your router, visit https://whatismyipaddress.com/. Your public IP should match your Algo VPN server's IP address. Test DNS resolution to ensure it's working through the VPN.
|
||||
|
||||
For command line verification, SSH into your router and check:
|
||||
```bash
|
||||
# Check interface status
|
||||
wg show
|
||||
|
||||
# Check routing table
|
||||
ip route
|
||||
|
||||
# Test connectivity
|
||||
ping 8.8.8.8
|
||||
```
|
||||
|
||||
## Configuration File Reference
|
||||
|
||||
Your OpenWrt network configuration (`/etc/config/network`) should include sections similar to:
|
||||
|
||||
```uci
|
||||
config interface 'AlgoVPN'
|
||||
option proto 'wireguard'
|
||||
list addresses '10.49.0.2/16'
|
||||
option private_key '<your_private_key>'
|
||||
|
||||
config wireguard_AlgoVPN
|
||||
option public_key '<server_public_key>'
|
||||
option preshared_key '<preshared_key>'
|
||||
option route_allowed_ips '1'
|
||||
list allowed_ips '0.0.0.0/0'
|
||||
option endpoint_host '......' # Server's public ip address
|
||||
list allowed_ips '::/0'
|
||||
option endpoint_host '<server_ip>'
|
||||
option endpoint_port '51820'
|
||||
option persistent_keepalive '25'
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If the interface won't connect, verify all keys are correctly copied with no extra spaces or line breaks. Check that your Algo server is running and accessible, and confirm the endpoint IP and port are correct.
|
||||
|
||||
If you have no internet access after connecting, verify firewall rules allow forwarding from LAN to VPN zone. Check that masquerading is enabled on the VPN zone and ensure MSS clamping is enabled.
|
||||
|
||||
If some websites don't work, try disabling MSS clamping temporarily to test. Verify DNS is working by testing `nslookup google.com` and check that IPv6 is properly configured if used.
|
||||
|
||||
For DNS resolution issues, configure custom DNS servers in Network → DHCP and DNS. Consider using your Algo server's DNS (typically `172.16.0.1`).
|
||||
|
||||
Check system logs for WireGuard-related errors:
|
||||
```bash
|
||||
# View system logs
|
||||
logread | grep -i wireguard
|
||||
|
||||
# Check kernel messages
|
||||
dmesg | grep -i wireguard
|
||||
```
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
For split tunneling (routing only specific traffic through the VPN), change "Allowed IPs" in the peer configuration to specific subnets and add custom routing rules for desired traffic.
|
||||
|
||||
If your Algo server supports IPv6, add the IPv6 address to your interface configuration and include `::/0` in "Allowed IPs" for the peer.
|
||||
|
||||
For optimal privacy, configure your router to use your Algo server's DNS by navigating to Network → DHCP and DNS and adding your Algo DNS server IP (typically `172.16.0.1`) to the DNS forwardings.
|
||||
|
||||
## Security Notes
|
||||
|
||||
Store your private keys securely and never share them. Keep OpenWrt and packages updated for security patches. Regularly check VPN connectivity to ensure ongoing protection, and save your configuration before making changes.
|
||||
|
||||
This configuration routes ALL traffic from your router through the VPN. If you need selective routing or have specific requirements, consider consulting the [OpenWrt WireGuard documentation](https://openwrt.org/docs/guide-user/services/vpn/wireguard/start) for advanced configurations.
|
95
docs/client-windows.md
Normal file
95
docs/client-windows.md
Normal file
|
@ -0,0 +1,95 @@
|
|||
# Windows Client Setup
|
||||
|
||||
This guide will help you set up your Windows device to connect to your Algo VPN server.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
- Windows 10 (all editions)
|
||||
- Windows 11 (all editions)
|
||||
- Windows Server 2016 and later
|
||||
|
||||
## WireGuard Setup (Recommended)
|
||||
|
||||
WireGuard is the recommended VPN protocol for Windows clients due to its simplicity and performance.
|
||||
|
||||
### Installation
|
||||
|
||||
1. Download and install the official [WireGuard client for Windows](https://www.wireguard.com/install/)
|
||||
2. Locate your configuration file: `configs/<server-ip>/wireguard/<username>.conf`
|
||||
3. In the WireGuard application, click "Import tunnel(s) from file"
|
||||
4. Select your `.conf` file and import it
|
||||
5. Click "Activate" to connect to your VPN
|
||||
|
||||
### Alternative Import Methods
|
||||
|
||||
- **QR Code**: If you have access to the QR code (`wireguard/<username>.png`), you can scan it using a mobile device first, then export the configuration
|
||||
- **Manual Entry**: You can create a new empty tunnel and paste the contents of your `.conf` file
|
||||
|
||||
## IPsec/IKEv2 Setup (Legacy)
|
||||
|
||||
While Algo supports IPsec/IKEv2, it requires PowerShell scripts for Windows setup. WireGuard is strongly recommended instead.
|
||||
|
||||
If you must use IPsec:
|
||||
1. Locate the PowerShell setup script in your configs directory
|
||||
2. Run PowerShell as Administrator
|
||||
3. Execute the setup script
|
||||
4. The VPN connection will appear in Settings → Network & Internet → VPN
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "The parameter is incorrect" Error
|
||||
|
||||
This is a common error that occurs when trying to connect. See the [troubleshooting guide](troubleshooting.md#windows-the-parameter-is-incorrect-error-when-connecting) for the solution.
|
||||
|
||||
### Connection Issues
|
||||
|
||||
1. **Check Windows Firewall**: Ensure Windows Firewall isn't blocking the VPN connection
|
||||
2. **Verify Server Address**: Make sure the server IP/domain in your configuration is correct
|
||||
3. **Check Date/Time**: Ensure your system date and time are correct
|
||||
4. **Disable Other VPNs**: Disconnect from any other VPN services before connecting
|
||||
|
||||
### WireGuard Specific Issues
|
||||
|
||||
- **DNS Not Working**: Check if "Block untunneled traffic (kill-switch)" is enabled in tunnel settings
|
||||
- **Slow Performance**: Try reducing the MTU in the tunnel configuration (default is 1420)
|
||||
- **Can't Import Config**: Ensure the configuration file has a `.conf` extension
|
||||
|
||||
### Performance Optimization
|
||||
|
||||
1. **Use WireGuard**: It's significantly faster than IPsec on Windows
|
||||
2. **Close Unnecessary Apps**: Some antivirus or firewall software can slow down VPN connections
|
||||
3. **Check Network Adapter**: Update your network adapter drivers to the latest version
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Split Tunneling
|
||||
|
||||
To exclude certain traffic from the VPN:
|
||||
1. Edit your WireGuard configuration file
|
||||
2. Modify the `AllowedIPs` line to exclude specific networks
|
||||
3. For example, to exclude local network: Remove `0.0.0.0/0` and add specific routes
|
||||
|
||||
### Automatic Connection
|
||||
|
||||
To connect automatically:
|
||||
1. Open WireGuard
|
||||
2. Select your tunnel
|
||||
3. Edit → Uncheck "On-demand activation"
|
||||
4. Windows will maintain the connection automatically
|
||||
|
||||
### Multiple Servers
|
||||
|
||||
You can import multiple `.conf` files for different Algo servers. Give each a descriptive name to distinguish them.
|
||||
|
||||
## Security Notes
|
||||
|
||||
- Keep your configuration files secure - they contain your private keys
|
||||
- Don't share your configuration with others
|
||||
- Each user should have their own unique configuration
|
||||
- Regularly update your WireGuard client for security patches
|
||||
|
||||
## Need More Help?
|
||||
|
||||
- Check the main [troubleshooting guide](troubleshooting.md)
|
||||
- Review [WireGuard documentation](https://www.wireguard.com/quickstart/)
|
||||
- [Create a discussion](https://github.com/trailofbits/algo/discussions) for help
|
|
@ -1,64 +1,81 @@
|
|||
# Amazon EC2 cloud setup
|
||||
# Amazon EC2 Cloud Setup
|
||||
|
||||
## AWS account creation
|
||||
This guide walks you through setting up Algo VPN on Amazon EC2, including account creation, permissions configuration, and deployment process.
|
||||
|
||||
Creating an Amazon AWS account requires giving Amazon a phone number that can receive a call and has a number pad to enter a PIN challenge displayed in the browser. This phone system prompt occasionally fails to correctly validate input, but try again (request a new PIN in the browser) until you succeed.
|
||||
## AWS Account Creation
|
||||
|
||||
### Select an EC2 plan
|
||||
Creating an Amazon AWS account requires providing a phone number that can receive automated calls with PIN verification. The phone verification system occasionally fails, but you can request a new PIN and try again until it succeeds.
|
||||
|
||||
The cheapest EC2 plan you can choose is the "Free Plan" a.k.a. the ["AWS Free Tier"](https://aws.amazon.com/free/). It is only available to new AWS customers, it has limits on usage, and it converts to standard pricing after 12 months (the "introductory period"). After you exceed the usage limits, after the 12 month period, or if you are an existing AWS customer, then you will pay standard pay-as-you-go service prices.
|
||||
## Choose Your EC2 Plan
|
||||
|
||||
*Note*: Your Algo instance will not stop working when you hit the bandwidth limit, you will just start accumulating service charges on your AWS account.
|
||||
### AWS Free Tier
|
||||
|
||||
As of the time of this writing (June 2024), the Free Tier limits include "750 hours of Amazon EC2 Linux t2.micro (some regions like the Middle East (Bahrain) region and the EU (Stockholm) region [do not offer t2.micro instances](https://aws.amazon.com/free/free-tier-faqs/)) or t3.micro instance usage" per month, [100 GB of bandwidth (outbound) per month](https://repost.aws/questions/QUAT1NfOeZSAK5z8KXXO9jgA/do-amazon-aws-ec2-free-tier-have-a-bandwidth-limit#ANNZSAFFk3T0Kv7ZHnZwf9Mw) from [November 2021](https://aws.amazon.com/blogs/aws/aws-free-tier-data-transfer-expansion-100-gb-from-regions-and-1-tb-from-amazon-cloudfront-per-month/), and 30 GB of cloud storage. Algo will not even use 1% of the storage limit, but you may have to monitor your bandwidth usage or keep an eye out for the email from Amazon when you are about to exceed the Free Tier limits.
|
||||
The most cost-effective option for new AWS customers is the [AWS Free Tier](https://aws.amazon.com/free/), which provides:
|
||||
|
||||
If you are not eligible for the free tier plan or have passed the 12 months of the introductory period, you can switch to [AWS Graviton](https://aws.amazon.com/ec2/graviton/) instances that are generally cheaper. To use the graviton instances, make the following changes in the ec2 section of your `config.cfg` file:
|
||||
* Set the `size` to `t4g.nano`
|
||||
* Set the `arch` to `arm64`
|
||||
- 750 hours of Amazon EC2 Linux t2.micro or t3.micro instance usage per month
|
||||
- 100 GB of outbound data transfer per month
|
||||
- 30 GB of cloud storage
|
||||
|
||||
> Currently, among all the instance sizes available on AWS, the t4g.nano instance is the least expensive option that does not require any promotional offers. However, AWS is currently running a promotion that provides a free trial of the `t4g.small` instance until December 31, 2023, which is available to all customers. For more information about this promotion, please refer to the [documentation](https://aws.amazon.com/ec2/faqs/#t4g-instances).
|
||||
The Free Tier is available for 12 months from account creation. Some regions like Middle East (Bahrain) and EU (Stockholm) don't offer t2.micro instances, but t3.micro is available as an alternative.
|
||||
|
||||
Additional configurations are documented in the [EC2 section of the deploy from ansible guide](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#amazon-ec2)
|
||||
Note that your Algo instance will continue working if you exceed bandwidth limits - you'll just start accruing standard charges on your AWS account.
|
||||
|
||||
### Create an AWS permissions policy
|
||||
### Cost-Effective Alternatives
|
||||
|
||||
In the AWS console, find the policies menu: click Services > IAM > Policies. Click Create Policy.
|
||||
If you're not eligible for the Free Tier or prefer more predictable costs, consider AWS Graviton instances. To use Graviton instances, modify your `config.cfg` file:
|
||||
|
||||
Here, you have the policy editor. Switch to the JSON tab and copy-paste over the existing empty policy with [the minimum required AWS policy needed for Algo deployment](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#minimum-required-iam-permissions-for-deployment).
|
||||
```yaml
|
||||
ec2:
|
||||
size: t4g.nano
|
||||
arch: arm64
|
||||
```
|
||||
|
||||
When prompted to name the policy, name it `AlgoVPN_Provisioning`.
|
||||
The t4g.nano instance is currently the least expensive option without promotional requirements. AWS is also running a promotion offering free t4g.small instances until December 31, 2025 - see the [AWS documentation](https://aws.amazon.com/ec2/faqs/#t4g-instances) for details.
|
||||
|
||||
For additional EC2 configuration options, see the [deploy from ansible guide](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#amazon-ec2).
|
||||
|
||||
## Set Up IAM Permissions
|
||||
|
||||
### Create IAM Policy
|
||||
|
||||
1. In the AWS console, navigate to Services → IAM → Policies
|
||||
2. Click "Create Policy"
|
||||
3. Switch to the JSON tab
|
||||
4. Replace the default content with the [minimum required AWS policy for Algo deployment](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#minimum-required-iam-permissions-for-deployment)
|
||||
5. Name the policy `AlgoVPN_Provisioning`
|
||||
|
||||

|
||||
|
||||
### Set up an AWS user
|
||||
### Create IAM User
|
||||
|
||||
In the AWS console, find the users (“Identity and Access Management”, a.k.a. IAM users) menu: click Services > IAM.
|
||||
|
||||
Activate multi-factor authentication (MFA) on your root account. The simplest choice is the mobile app "Google Authenticator." A hardware U2F token is ideal (less prone to a phishing attack), but a TOTP authenticator like this is good enough.
|
||||
1. Navigate to Services → IAM → Users
|
||||
2. Enable multi-factor authentication (MFA) on your root account using Google Authenticator or a hardware token
|
||||
3. Click "Add User" and create a username (e.g., `algovpn`)
|
||||
4. Select "Programmatic access"
|
||||
5. Click "Next: Permissions"
|
||||
|
||||

|
||||
|
||||
Now "Create individual IAM users" and click Add User. Create a user name. I chose “algovpn”. Then click the box next to Programmatic Access. Then click Next.
|
||||
|
||||

|
||||
|
||||
Next, click “Attach existing policies directly.” Type “Algo” in the search box to filter the policies. Find “AlgoVPN_Provisioning” (the policy you created) and click the checkbox next to that. Click Next when you’re done.
|
||||
6. Choose "Attach existing policies directly"
|
||||
7. Search for "Algo" and select the `AlgoVPN_Provisioning` policy you created
|
||||
8. Click "Next: Tags" (optional), then "Next: Review"
|
||||
|
||||

|
||||
|
||||
The user creation confirmation screen should look like this if you've done everything correctly.
|
||||
|
||||

|
||||
|
||||
On the final screen, click the Download CSV button. This file includes the AWS access keys you’ll need during the Algo set-up process. Click Close, and you’re all set.
|
||||
9. Review your settings and click "Create user"
|
||||
10. Download the CSV file containing your access credentials - you'll need these for Algo deployment
|
||||
|
||||

|
||||
|
||||
## Using EC2 during Algo setup
|
||||
Keep the CSV file secure as it contains sensitive credentials that grant access to your AWS account.
|
||||
|
||||
After you have downloaded Algo and installed its dependencies, the next step is running Algo to provision the VPN server on your AWS account.
|
||||
## Deploy with Algo
|
||||
|
||||
First, you will be asked which server type to setup. You would want to enter "3" to use Amazon EC2.
|
||||
Once you've installed Algo and its dependencies, you can deploy your VPN server to EC2.
|
||||
|
||||
### Provider Selection
|
||||
|
||||
Run `./algo` and select Amazon EC2 when prompted:
|
||||
|
||||
```
|
||||
$ ./algo
|
||||
|
@ -81,7 +98,15 @@ Enter the number of your desired provider
|
|||
: 3
|
||||
```
|
||||
|
||||
Next, you will be asked for the AWS Access Key (Access Key ID) and AWS Secret Key (Secret Access Key) that you received in the CSV file when you setup the account (don't worry if you don't see your text entered in the console; the key input is hidden here by Algo).
|
||||
### AWS Credentials
|
||||
|
||||
Algo will automatically detect AWS credentials in this order:
|
||||
|
||||
1. Command-line variables
|
||||
2. Environment variables (`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`)
|
||||
3. AWS credentials file (`~/.aws/credentials`)
|
||||
|
||||
If no credentials are found, you'll be prompted to enter them manually:
|
||||
|
||||
```
|
||||
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
|
@ -94,14 +119,18 @@ Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing
|
|||
[ABCD...]:
|
||||
```
|
||||
|
||||
You will be prompted for the server name to enter. Feel free to leave this as the default ("algo") if you are not certain how this will affect your setup. Here we chose to call it "algovpn".
|
||||
For detailed credential configuration options, see the [AWS Credentials guide](aws-credentials.md).
|
||||
|
||||
### Server Configuration
|
||||
|
||||
You'll be prompted to name your server (default is "algo"):
|
||||
|
||||
```
|
||||
Name the vpn server:
|
||||
[algo]: algovpn
|
||||
```
|
||||
|
||||
After entering the server name, the script ask which region you wish to setup your new Algo instance in. Enter the number next to name of the region.
|
||||
Next, select your preferred AWS region:
|
||||
|
||||
```
|
||||
What region should the server be located in?
|
||||
|
@ -128,8 +157,20 @@ Enter the number of your desired region
|
|||
:
|
||||
```
|
||||
|
||||
You will then be asked the remainder of the standard Algo setup questions.
|
||||
Choose a region close to your location for optimal performance, keeping in mind that some regions may have different pricing or instance availability.
|
||||
|
||||
## Cleanup
|
||||
After region selection, Algo will continue with the standard setup questions for user configuration and VPN options.
|
||||
|
||||
If you've installed Algo onto EC2 multiple times, your AWS account may become cluttered with unused or deleted resources e.g. instances, VPCs, subnets, etc. This may cause future installs to fail. The easiest way to clean up after you're done with a server is to go to "CloudFormation" from the console and delete the CloudFormation stack associated with that server. Please note that unless you've enabled termination protection on your instance, deleting the stack this way will delete your instance without warning, so be sure you are deleting the correct stack.
|
||||
## Resource Cleanup
|
||||
|
||||
If you deploy Algo to EC2 multiple times, unused resources (instances, VPCs, subnets) may accumulate and potentially cause future deployment issues.
|
||||
|
||||
The cleanest way to remove an Algo deployment is through CloudFormation:
|
||||
|
||||
1. Go to the AWS console and navigate to CloudFormation
|
||||
2. Find the stack associated with your Algo server
|
||||
3. Delete the entire stack
|
||||
|
||||
Warning: Deleting a CloudFormation stack will permanently delete your EC2 instance and all associated resources unless you've enabled termination protection. Make sure you're deleting the correct stack and have backed up any important data.
|
||||
|
||||
This approach ensures all related AWS resources are properly cleaned up, preventing resource conflicts in future deployments.
|
|
@ -1,10 +1,17 @@
|
|||
# Deploy from Google Cloud Shell
|
||||
|
||||
If you want to try Algo but don't wish to install the software on your own system, you can use the **free** [Google Cloud Shell](https://cloud.google.com/shell/) to deploy a VPN to any supported cloud provider. Note that you cannot choose `Install to existing Ubuntu server` to turn Google Cloud Shell into your VPN server.
|
||||
If you want to try Algo but don't wish to install anything on your own system, you can use the **free** [Google Cloud Shell](https://cloud.google.com/shell/) to deploy a VPN to any supported cloud provider. Note that you cannot choose `Install to existing Ubuntu server` to turn Google Cloud Shell into your VPN server.
|
||||
|
||||
1. See the [Cloud Shell documentation](https://cloud.google.com/shell/docs/) to start an instance of Cloud Shell in your browser.
|
||||
|
||||
2. Follow the [Algo installation instructions](https://github.com/trailofbits/algo#deploy-the-algo-server) as shown but skip step **3. Install Algo's core dependencies** as they are already installed. Run Algo to deploy to a supported cloud provider.
|
||||
2. Get Algo and run it:
|
||||
```bash
|
||||
git clone https://github.com/trailofbits/algo.git
|
||||
cd algo
|
||||
./algo
|
||||
```
|
||||
|
||||
The first time you run `./algo`, it will automatically install all required dependencies. Google Cloud Shell already has most tools available, making this even faster than on your local system.
|
||||
|
||||
3. Once Algo has completed, retrieve a copy of the configuration files that were created to your local system. While still in the Algo directory, run:
|
||||
```
|
||||
|
|
|
@ -1,66 +1,22 @@
|
|||
# Deploy from macOS
|
||||
|
||||
While you can't turn a macOS system in an AlgoVPN, you can install the Algo scripts on a macOS system and use them to deploy your AlgoVPN to a cloud provider.
|
||||
You can install the Algo scripts on a macOS system and use them to deploy your AlgoVPN to a cloud provider.
|
||||
|
||||
Algo uses [Ansible](https://www.ansible.com) which requires Python 3. macOS includes an obsolete version of Python 2 installed as `/usr/bin/python` which you should ignore.
|
||||
## Installation
|
||||
|
||||
## macOS 10.15 Catalina
|
||||
Algo handles all Python setup automatically. Simply:
|
||||
|
||||
Catalina comes with Python 3 installed as `/usr/bin/python3`. This file, and certain others like `/usr/bin/git`, start out as stub files that prompt you to install the Command Line Developer Tools package the first time you run them. This is the easiest way to install Python 3 on Catalina.
|
||||
1. Get Algo: `git clone https://github.com/trailofbits/algo.git && cd algo`
|
||||
2. Run Algo: `./algo`
|
||||
|
||||
Note that Python 3 from Command Line Developer Tools prior to the release for Xcode 11.5 on 2020-05-20 might not work with Algo. If Software Update does not offer to update an older version of the tools, you can download a newer version from [here](https://developer.apple.com/download/more/) (Apple ID login required).
|
||||
The first time you run `./algo`, it will automatically install the required Python environment (Python 3.11+) using [uv](https://docs.astral.sh/uv/), a fast Python package manager. This works on all macOS versions without any manual Python installation.
|
||||
|
||||
## macOS prior to 10.15 Catalina
|
||||
## What happens automatically
|
||||
|
||||
You'll need to install Python 3 before you can run Algo. Python 3 is available from different packagers, two of which are listed below.
|
||||
When you run `./algo` for the first time:
|
||||
- uv is installed automatically using curl
|
||||
- Python 3.11+ is installed and managed by uv
|
||||
- All required dependencies (Ansible, etc.) are installed
|
||||
- Your VPN deployment begins
|
||||
|
||||
### Ansible and SSL Validation
|
||||
|
||||
Ansible validates SSL network connections using OpenSSL but macOS includes LibreSSL which behaves differently. Therefore each version of Python below includes or depends on its own copy of OpenSSL.
|
||||
|
||||
OpenSSL needs access to a list of trusted CA certificates in order to validate SSL connections. Each packager handles initializing this certificate store differently. If you see the error `CERTIFICATE_VERIFY_FAILED` when running Algo make sure you've followed the packager-specific instructions correctly.
|
||||
|
||||
### Choose a packager and install Python 3
|
||||
|
||||
Choose one of the packagers below as your source for Python 3. Avoid installing versions from multiple packagers on the same Mac as you may encounter conflicts. In particular they might fight over creating symbolic links in `/usr/local/bin`.
|
||||
|
||||
#### Option 1: Install using the Homebrew package manager
|
||||
|
||||
If you're comfortable using the command line in Terminal the [Homebrew](https://brew.sh) project is a great source of software for macOS.
|
||||
|
||||
First install Homebrew using the instructions on the [Homebrew](https://brew.sh) page.
|
||||
|
||||
The install command below takes care of initializing the CA certificate store.
|
||||
|
||||
##### Installation
|
||||
```
|
||||
brew install python3
|
||||
```
|
||||
After installation open a new tab or window in Terminal and verify that the command `which python3` returns `/usr/local/bin/python3`.
|
||||
|
||||
##### Removal
|
||||
```
|
||||
brew uninstall python3
|
||||
```
|
||||
|
||||
#### Option 2: Install the package from Python.org
|
||||
|
||||
If you don't want to install a package manager, you can download the Python package for macOS from [python.org](https://www.python.org/downloads/mac-osx/).
|
||||
|
||||
##### Installation
|
||||
|
||||
Download the most recent version of Python and install it like any other macOS package. Then initialize the CA certificate store from Finder by double-clicking on the file `Install Certificates.command` found in the `/Applications/Python 3.8` folder.
|
||||
|
||||
When you double-click on `Install Certificates.command` a new Terminal window will open. If the window remains blank, then the command has not run correctly. This can happen if you've changed the default shell in Terminal Preferences. Try changing it back to the default and run `Install Certificates.command` again.
|
||||
|
||||
After installation open a new tab or window in Terminal and verify that the command `which python3` returns either `/usr/local/bin/python3` or `/Library/Frameworks/Python.framework/Versions/3.8/bin/python3`.
|
||||
|
||||
##### Removal
|
||||
|
||||
Unfortunately, the python.org package does not include an uninstaller and removing it requires several steps:
|
||||
|
||||
1. In Finder, delete the package folder found in `/Applications`.
|
||||
2. In Finder, delete the *rest* of the package found under ` /Library/Frameworks/Python.framework/Versions`.
|
||||
3. In Terminal, undo the changes to your `PATH` by running:
|
||||
```mv ~/.bash_profile.pysave ~/.bash_profile```
|
||||
4. In Terminal, remove the dozen or so symbolic links the package created in `/usr/local/bin`. Or just leave them because installing another version of Python will overwrite most of them.
|
||||
No manual Python installation, virtual environments, or dependency management required!
|
||||
|
|
|
@ -1,74 +1,107 @@
|
|||
# Deploy from Windows
|
||||
|
||||
The Algo scripts can't be run directly on Windows, but you can use the Windows Subsystem for Linux (WSL) to run a copy of Ubuntu Linux right on your Windows system. You can then run Algo to deploy a VPN server to a supported cloud provider, though you can't turn the instance of Ubuntu running under WSL into a VPN server.
|
||||
You have three options to run Algo on Windows:
|
||||
|
||||
To run WSL you will need:
|
||||
1. **PowerShell Script** (Recommended) - Automated WSL wrapper for easy use
|
||||
2. **Windows Subsystem for Linux (WSL)** - Direct Linux environment access
|
||||
3. **Git Bash/MSYS2** - Unix-like shell environment (limited compatibility)
|
||||
|
||||
* A 64-bit system
|
||||
* 64-bit Windows 10 (Anniversary update or later version)
|
||||
## Option 1: PowerShell Script (Recommended)
|
||||
|
||||
## Install WSL
|
||||
The PowerShell script provides the easiest Windows experience by automatically using WSL when needed:
|
||||
|
||||
Enable the 'Windows Subsystem for Linux':
|
||||
|
||||
1. Open 'Settings'
|
||||
2. Click 'Update & Security', then click the 'For developers' option on the left.
|
||||
3. Toggle the 'Developer mode' option, and accept any warnings Windows pops up.
|
||||
|
||||
Wait a minute for Windows to install a few things in the background (it will eventually let you know a restart may be required for changes to take effect—ignore that for now). Next, to install the actual Linux Subsystem, you have to jump over to 'Control Panel', and do the following:
|
||||
|
||||
1. Click on 'Programs'
|
||||
2. Click on 'Turn Windows features on or off'
|
||||
3. Scroll down and check 'Windows Subsystem for Linux', and then click OK.
|
||||
4. The subsystem will be installed, then Windows will require a restart.
|
||||
5. Restart Windows and then install [Ubuntu 20.04 LTS from the Windows Store](https://www.microsoft.com/p/ubuntu-2004-lts/9n6svws3rx71).
|
||||
6. Run Ubuntu from the Start menu. It will take a few minutes to install. It will have you create a separate user account for the Linux subsystem. Once that's done, you will finally have Ubuntu running somewhat integrated with Windows.
|
||||
|
||||
## Install Algo
|
||||
|
||||
Run these commands in the Ubuntu Terminal to install a prerequisite package and download the Algo scripts to your home directory. Note that when using WSL you should **not** install Algo in the `/mnt/c` directory due to problems with file permissions.
|
||||
|
||||
You may need to follow [these directions](https://devblogs.microsoft.com/commandline/copy-and-paste-arrives-for-linuxwsl-consoles/) in order to paste commands into the Ubuntu Terminal.
|
||||
|
||||
```shell
|
||||
cd
|
||||
umask 0002
|
||||
sudo apt update
|
||||
sudo apt install -y python3-virtualenv
|
||||
```powershell
|
||||
git clone https://github.com/trailofbits/algo
|
||||
cd algo
|
||||
.\algo.ps1
|
||||
```
|
||||
|
||||
## Post installation steps
|
||||
**How it works:**
|
||||
- Detects if you're already in WSL and uses the standard Unix approach
|
||||
- On native Windows, automatically runs Algo via WSL (since Ansible requires Unix)
|
||||
- Provides clear guidance if WSL isn't installed
|
||||
|
||||
These steps should be only if you clone the Algo repository to the host machine disk (C:, D:, etc.). WSL mount host system disks to `\mnt` directory.
|
||||
**Requirements:**
|
||||
- Windows Subsystem for Linux (WSL) with Ubuntu 22.04
|
||||
- If WSL isn't installed, the script will guide you through installation
|
||||
|
||||
### Allow git to change files metadata
|
||||
## Option 2: Windows Subsystem for Linux (WSL)
|
||||
|
||||
By default, git cannot change files metadata (using chmod for example) for files stored at host machine disks (https://docs.microsoft.com/en-us/windows/wsl/wsl-config#set-wsl-launch-settings). Allow it:
|
||||
For users who prefer a full Linux environment or need advanced features:
|
||||
|
||||
1. Start Ubuntu Terminal.
|
||||
2. Edit /etc/wsl.conf (create it if it doesn't exist). Add the following:
|
||||
### Prerequisites
|
||||
* 64-bit Windows 10/11 (Anniversary update or later)
|
||||
|
||||
### Setup WSL
|
||||
1. Install WSL from PowerShell (as Administrator):
|
||||
```powershell
|
||||
wsl --install -d Ubuntu-22.04
|
||||
```
|
||||
|
||||
2. After restart, open Ubuntu and create your user account
|
||||
|
||||
### Install Algo in WSL
|
||||
```bash
|
||||
cd ~
|
||||
git clone https://github.com/trailofbits/algo
|
||||
cd algo
|
||||
./algo
|
||||
```
|
||||
|
||||
**Important**: Don't install Algo in `/mnt/c` directory due to file permission issues.
|
||||
|
||||
### WSL Configuration (if needed)
|
||||
|
||||
You may encounter permission issues if you clone Algo to a Windows drive (like `/mnt/c/`). Symptoms include:
|
||||
|
||||
- **Git errors**: "fatal: could not set 'core.filemode' to 'false'"
|
||||
- **Ansible errors**: "ERROR! Skipping, '/mnt/c/.../ansible.cfg' as it is not safe to use as a configuration file"
|
||||
- **SSH key errors**: "WARNING: UNPROTECTED PRIVATE KEY FILE!" or "Permissions 0777 for key are too open"
|
||||
|
||||
If you see these errors, configure WSL:
|
||||
|
||||
1. Edit `/etc/wsl.conf` to allow metadata:
|
||||
```ini
|
||||
[automount]
|
||||
options = "metadata"
|
||||
```
|
||||
3. Close all Ubuntu Terminals.
|
||||
4. Run powershell.
|
||||
5. Run `wsl --shutdown` in powershell.
|
||||
|
||||
### Allow run Ansible in a world writable directory
|
||||
2. Restart WSL completely:
|
||||
```powershell
|
||||
wsl --shutdown
|
||||
```
|
||||
|
||||
Ansible treats host machine directories as world writable directory and do not load .cfg from it by default (https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir). For fix run inside `algo` directory:
|
||||
|
||||
```shell
|
||||
3. Fix directory permissions for Ansible:
|
||||
```bash
|
||||
chmod 744 .
|
||||
```
|
||||
|
||||
Now you can continue by following the [README](https://github.com/trailofbits/algo#deploy-the-algo-server) from the 4th step to deploy your Algo server!
|
||||
**Why this happens**: Windows filesystems mounted in WSL (`/mnt/c/`) don't support Unix file permissions by default. Git can't set executable bits, and Ansible refuses to load configs from "world-writable" directories for security.
|
||||
|
||||
You'll be instructed to edit the file `config.cfg` in order to specify the Algo user accounts to be created. If you're new to Linux the simplest editor to use is `nano`. To edit the file while in the `algo` directory, run:
|
||||
```shell
|
||||
nano config.cfg
|
||||
After deployment, copy configs to Windows:
|
||||
```bash
|
||||
cp -r configs /mnt/c/Users/$USER/
|
||||
```
|
||||
Once `./algo` has finished you can use the `cp` command to copy the configuration files from the `configs` directory into your Windows directory under `/mnt/c/Users` for easier access.
|
||||
|
||||
## Option 3: Git Bash/MSYS2
|
||||
|
||||
If you have Git for Windows installed, you can use the included Git Bash terminal:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/trailofbits/algo
|
||||
cd algo
|
||||
./algo
|
||||
```
|
||||
|
||||
**Pros**:
|
||||
- Uses the standard Unix `./algo` script
|
||||
- No WSL setup required
|
||||
- Familiar Unix-like environment
|
||||
|
||||
**Cons**:
|
||||
- **Limited compatibility**: Ansible may not work properly due to Windows/Unix differences
|
||||
- **Not officially supported**: May encounter unpredictable issues
|
||||
- Less robust than WSL or PowerShell options
|
||||
- Requires Git for Windows installation
|
||||
|
||||
**Note**: This approach is not recommended due to Ansible's Unix requirements. Use WSL-based options instead.
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
# FreeBSD / HardenedBSD server setup
|
||||
|
||||
FreeBSD server support is a work in progress. For now, it is only possible to install Algo on existing FreeBSD 11 systems.
|
||||
|
||||
## System preparation
|
||||
|
||||
Ensure that the following kernel options are enabled:
|
||||
|
||||
```
|
||||
# sysctl kern.conftxt | grep -iE "IPSEC|crypto"
|
||||
options IPSEC
|
||||
options IPSEC_NAT_T
|
||||
device crypto
|
||||
```
|
||||
|
||||
## Available roles
|
||||
|
||||
* vpn
|
||||
* ssh_tunneling
|
||||
* dns_adblocking
|
||||
|
||||
## Additional variables
|
||||
|
||||
* rebuild_kernel - set to `true` if you want to let Algo to rebuild your kernel if needed (takes a lot of time)
|
||||
|
||||
## Installation
|
||||
|
||||
```shell
|
||||
ansible-playbook main.yml -e "provider=local"
|
||||
```
|
||||
|
||||
And follow the instructions
|
|
@ -1,25 +1,34 @@
|
|||
# Local Installation
|
||||
|
||||
**PLEASE NOTE**: Algo is intended for use to create a _dedicated_ VPN server. No uninstallation option is provided. If you install Algo on an existing server any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
|
||||
**IMPORTANT**: Algo is designed to create a dedicated VPN server. There is no uninstallation option. Installing Algo on an existing server may break existing services, especially since firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for details.
|
||||
|
||||
------
|
||||
## Requirements
|
||||
|
||||
## Outbound VPN Server
|
||||
Algo currently supports **Ubuntu 22.04 LTS only**. Your target server must be running an unmodified installation of Ubuntu 22.04.
|
||||
|
||||
You can use Algo to configure a pre-existing server as an AlgoVPN rather than using it to create and configure a new server on a supported cloud provider. This is referred to as a **local** installation rather than a **cloud** deployment. If you're new to Algo or unfamiliar with Linux you'll find a cloud deployment to be easier.
|
||||
## Installation
|
||||
|
||||
To perform a local installation, install the Algo scripts following the normal installation instructions, then choose:
|
||||
You can install Algo on an existing Ubuntu server instead of creating a new cloud instance. This is called a **local** installation. If you're new to Algo or Linux, cloud deployment is easier.
|
||||
|
||||
```
|
||||
Install to existing Ubuntu latest LTS server (for more advanced users)
|
||||
1. Follow the normal Algo installation instructions
|
||||
2. When prompted, choose: `Install to existing Ubuntu latest LTS server (for advanced users)`
|
||||
3. The target can be:
|
||||
- The same system where you installed Algo (requires `sudo ./algo`)
|
||||
- A remote Ubuntu server accessible via SSH without password prompts (use `ssh-agent`)
|
||||
|
||||
For local installation on the same machine, you must run:
|
||||
```bash
|
||||
sudo ./algo
|
||||
```
|
||||
|
||||
Make sure your target server is running an unmodified copy of the operating system version specified. The target can be the same system where you've installed the Algo scripts, or a remote system that you are able to access as root via SSH without needing to enter the SSH key passphrase (such as when using `ssh-agent`).
|
||||
## Road Warrior Setup
|
||||
|
||||
## Inbound VPN Server (also called "Road Warrior" setup)
|
||||
A "road warrior" setup lets you securely access your home network and its resources when traveling. This involves installing Algo on a server within your home LAN.
|
||||
|
||||
Some may find it useful to set up an Algo server on an Ubuntu box on your home LAN, with the intention of being able to securely access your LAN and any resources on it when you're traveling elsewhere (the ["road warrior" setup](https://en.wikipedia.org/wiki/Road_warrior_(computing))). A few tips if you're doing so:
|
||||
**Network Configuration:**
|
||||
- Forward the necessary ports from your router to the Algo server (see [firewall documentation](/docs/firewalls.md#external-firewall))
|
||||
|
||||
- Make sure you forward any [relevant incoming ports](/docs/firewalls.md#external-firewall) to the Algo server from your router;
|
||||
- Change `BetweenClients_DROP` in `config.cfg` to `false`, and also consider changing `block_smb` and `block_netbios` to `false`;
|
||||
- If you want to use a DNS server on your LAN to resolve local domain names properly (e.g. a Pi-hole), set the `dns_encryption` flag in `config.cfg` to `false`, and change `dns_servers` to the local DNS server IP (i.e. `192.168.1.2`).
|
||||
**Algo Configuration** (edit `config.cfg` before deployment):
|
||||
- Set `BetweenClients_DROP` to `false` (allows VPN clients to reach your LAN)
|
||||
- Consider setting `block_smb` and `block_netbios` to `false` (enables SMB/NetBIOS traffic)
|
||||
- For local DNS resolution (e.g., Pi-hole), set `dns_encryption` to `false` and update `dns_servers` to your local DNS server IP
|
||||
|
|
|
@ -1,20 +1,81 @@
|
|||
# Unsupported Cloud Providers
|
||||
# Deploying to Unsupported Cloud Providers
|
||||
|
||||
Algo officially supports the [cloud providers listed here](https://github.com/trailofbits/algo/blob/master/README.md#deploy-the-algo-server). If you want to deploy Algo on another virtual hosting provider, that provider must support:
|
||||
Algo officially supports the [cloud providers listed in the README](https://github.com/trailofbits/algo/blob/master/README.md#deploy-the-algo-server). If you want to deploy Algo on another cloud provider, that provider must meet specific technical requirements for compatibility.
|
||||
|
||||
1. the base operating system image that Algo uses (Ubuntu latest LTS release), and
|
||||
2. a minimum of certain kernel modules required for the strongSwan IPsec server.
|
||||
## Technical Requirements
|
||||
|
||||
Please see the [Required Kernel Modules](https://wiki.strongswan.org/projects/strongswan/wiki/KernelModules) documentation from strongSwan for a list of the specific required modules and a script to check for them. As a first step, we recommend running their shell script to determine initial compatibility with your new hosting provider.
|
||||
Your cloud provider must support:
|
||||
|
||||
If you want Algo to officially support your new cloud provider then it must have an Ansible [cloud module](https://docs.ansible.com/ansible/list_of_cloud_modules.html) available. If no module is available for your provider, search Ansible's [open issues](https://github.com/ansible/ansible/issues) and [pull requests](https://github.com/ansible/ansible/pulls) for existing efforts to add it. If none are available, then you may want to develop the module yourself. Reference the [Ansible module developer documentation](https://docs.ansible.com/ansible/dev_guide/developing_modules.html) and the API documentation for your hosting provider.
|
||||
1. **Ubuntu 22.04 LTS** - Algo exclusively supports Ubuntu 22.04 LTS as the base operating system
|
||||
2. **Required kernel modules** - Specific modules needed for strongSwan IPsec and WireGuard VPN functionality
|
||||
3. **Network capabilities** - Full networking stack access, not containerized environments
|
||||
|
||||
## IPsec in userland
|
||||
## Compatibility Testing
|
||||
|
||||
Hosting providers that rely on OpenVZ or Docker cannot be used by Algo since they cannot load the required kernel modules or access the required network interfaces. For more information, see the strongSwan documentation on [Cloud Platforms](https://wiki.strongswan.org/projects/strongswan/wiki/Cloudplatforms).
|
||||
Before attempting to deploy Algo on an unsupported provider, test compatibility using strongSwan's kernel module checker:
|
||||
|
||||
In order to address this issue, strongSwan has developed the [kernel-libipsec](https://wiki.strongswan.org/projects/strongswan/wiki/Kernel-libipsec) plugin which provides an IPsec backend that works entirely in userland. `libipsec` bundles its own IPsec implementation and uses TUN devices to route packets. For example, `libipsec` is used by the Android strongSwan app to address Android's lack of a functional IPsec stack.
|
||||
1. Deploy a basic Ubuntu 22.04 LTS instance on your target provider
|
||||
2. Run the [kernel module compatibility script](https://wiki.strongswan.org/projects/strongswan/wiki/KernelModules) from strongSwan
|
||||
3. Verify all required modules are available and loadable
|
||||
|
||||
Use of `libipsec` is not supported by Algo. It has known performance issues since it buffers each packet in memory. On certain systems with insufficient processor power, such as many cloud hosting providers, using `libipsec` can lead to an out of memory condition, crash the charon daemon, or lock up the entire host.
|
||||
The script will identify any missing kernel modules that would prevent Algo from functioning properly.
|
||||
|
||||
Further, `libipsec` introduces unknown security risks. The code in `libipsec` has not been scrutinized to the same level as the code in the Linux or FreeBSD kernel that it replaces. This additional code introduces new complexity to the Algo server that we want to avoid at this time. We recommend moving to a hosting provider that does not require libipsec and can load the required kernel modules.
|
||||
## Adding Official Support
|
||||
|
||||
For Algo to officially support a new cloud provider, the provider must have:
|
||||
|
||||
- An available Ansible [cloud module](https://docs.ansible.com/ansible/list_of_cloud_modules.html)
|
||||
- Reliable API for programmatic instance management
|
||||
- Consistent Ubuntu 22.04 LTS image availability
|
||||
|
||||
If no Ansible module exists for your provider:
|
||||
|
||||
1. Check Ansible's [open issues](https://github.com/ansible/ansible/issues) and [pull requests](https://github.com/ansible/ansible/pulls) for existing development efforts
|
||||
2. Consider developing the module yourself using the [Ansible module developer documentation](https://docs.ansible.com/ansible/dev_guide/developing_modules.html)
|
||||
3. Reference your provider's API documentation for implementation details
|
||||
|
||||
## Unsupported Environments
|
||||
|
||||
### Container-Based Hosting
|
||||
|
||||
Providers using **OpenVZ**, **Docker containers**, or other **containerized environments** cannot run Algo because:
|
||||
|
||||
- Container environments don't provide access to kernel modules
|
||||
- VPN functionality requires low-level network interface access
|
||||
- IPsec and WireGuard need direct kernel interaction
|
||||
|
||||
For more details, see strongSwan's [Cloud Platforms documentation](https://wiki.strongswan.org/projects/strongswan/wiki/Cloudplatforms).
|
||||
|
||||
### Userland IPsec (libipsec)
|
||||
|
||||
Some providers attempt to work around kernel limitations using strongSwan's [kernel-libipsec](https://wiki.strongswan.org/projects/strongswan/wiki/Kernel-libipsec) plugin, which implements IPsec entirely in userspace.
|
||||
|
||||
**Algo does not support libipsec** for these reasons:
|
||||
|
||||
- **Performance issues** - Buffers each packet in memory, causing performance degradation
|
||||
- **Resource consumption** - Can cause out-of-memory conditions on resource-constrained systems
|
||||
- **Stability concerns** - May crash the charon daemon or lock up the host system
|
||||
- **Security implications** - Less thoroughly audited than kernel implementations
|
||||
- **Added complexity** - Introduces additional code paths that increase attack surface
|
||||
|
||||
We strongly recommend choosing a provider that supports native kernel modules rather than attempting workarounds.
|
||||
|
||||
## Alternative Deployment Options
|
||||
|
||||
If your preferred provider doesn't support Algo's requirements:
|
||||
|
||||
1. **Use a supported provider** - Deploy on AWS, DigitalOcean, Azure, GCP, or another [officially supported provider](https://github.com/trailofbits/algo/blob/master/README.md#deploy-the-algo-server)
|
||||
2. **Deploy locally** - Use the [Ubuntu server deployment option](deploy-to-ubuntu.md) on your own hardware
|
||||
3. **Hybrid approach** - Deploy the VPN server on a supported provider while using your preferred provider for other services
|
||||
|
||||
## Contributing Support
|
||||
|
||||
If you successfully deploy Algo on an unsupported provider and want to contribute official support:
|
||||
|
||||
1. Ensure the provider meets all technical requirements
|
||||
2. Verify consistent deployment success across multiple regions
|
||||
3. Create an Ansible module or verify existing module compatibility
|
||||
4. Document the deployment process and any provider-specific considerations
|
||||
5. Submit a pull request with your implementation
|
||||
|
||||
Community contributions to expand provider support are welcome, provided they meet Algo's security and reliability standards.
|
|
@ -6,7 +6,7 @@
|
|||
* [Why aren't you using Racoon, LibreSwan, or OpenSwan?](#why-arent-you-using-racoon-libreswan-or-openswan)
|
||||
* [Why aren't you using a memory-safe or verified IKE daemon?](#why-arent-you-using-a-memory-safe-or-verified-ike-daemon)
|
||||
* [Why aren't you using OpenVPN?](#why-arent-you-using-openvpn)
|
||||
* [Why aren't you using Alpine Linux, OpenBSD, or HardenedBSD?](#why-arent-you-using-alpine-linux-openbsd-or-hardenedbsd)
|
||||
* [Why aren't you using Alpine Linux or OpenBSD?](#why-arent-you-using-alpine-linux-or-openbsd)
|
||||
* [I deployed an Algo server. Can you update it with new features?](#i-deployed-an-algo-server-can-you-update-it-with-new-features)
|
||||
* [Where did the name "Algo" come from?](#where-did-the-name-algo-come-from)
|
||||
* [Can DNS filtering be disabled?](#can-dns-filtering-be-disabled)
|
||||
|
@ -40,9 +40,9 @@ I would, but I don't know of any [suitable ones](https://github.com/trailofbits/
|
|||
|
||||
OpenVPN does not have out-of-the-box client support on any major desktop or mobile operating system. This introduces user experience issues and requires the user to [update](https://www.exploit-db.com/exploits/34037/) and [maintain](https://www.exploit-db.com/exploits/20485/) the software themselves. OpenVPN depends on the security of [TLS](https://tools.ietf.org/html/rfc7457), both the [protocol](https://arstechnica.com/security/2016/08/new-attack-can-pluck-secrets-from-1-of-https-traffic-affects-top-sites/) and its [implementations](https://arstechnica.com/security/2014/04/confirmed-nasty-heartbleed-bug-exposes-openvpn-private-keys-too/), and we simply trust the server less due to [past](https://sweet32.info/) [security](https://github.com/ValdikSS/openvpn-fix-dns-leak-plugin/blob/master/README.md) [incidents](https://www.exploit-db.com/exploits/34879/).
|
||||
|
||||
## Why aren't you using Alpine Linux, OpenBSD, or HardenedBSD?
|
||||
## Why aren't you using Alpine Linux or OpenBSD?
|
||||
|
||||
Alpine Linux is not supported out-of-the-box by any major cloud provider. We are interested in supporting Free-, Open-, and HardenedBSD. Follow along or contribute to our BSD support in [this issue](https://github.com/trailofbits/algo/issues/35).
|
||||
Alpine Linux is not supported out-of-the-box by any major cloud provider. While we considered BSD variants in the past, Algo now focuses exclusively on Ubuntu LTS for consistency, security, and maintainability.
|
||||
|
||||
## I deployed an Algo server. Can you update it with new features?
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
- Configure [CloudStack](cloud-cloudstack.md)
|
||||
- Configure [Hetzner Cloud](cloud-hetzner.md)
|
||||
* Advanced Deployment
|
||||
- Deploy to your own [FreeBSD](deploy-to-freebsd.md) server
|
||||
- Deploy to your own [Ubuntu](deploy-to-ubuntu.md) server, and road warrior setup
|
||||
- Deploy to an [unsupported cloud provider](deploy-to-unsupported-cloud.md)
|
||||
* [FAQ](faq.md)
|
||||
|
|
|
@ -1,88 +0,0 @@
|
|||
# Linting and Code Quality
|
||||
|
||||
This document describes the linting and code quality checks used in the Algo VPN project.
|
||||
|
||||
## Overview
|
||||
|
||||
The project uses multiple linters to ensure code quality across different file types:
|
||||
- **Ansible** playbooks and roles
|
||||
- **Python** library modules and tests
|
||||
- **Shell** scripts
|
||||
- **YAML** configuration files
|
||||
|
||||
## Linters in Use
|
||||
|
||||
### 1. Ansible Linting
|
||||
- **Tool**: `ansible-lint`
|
||||
- **Config**: `.ansible-lint`
|
||||
- **Checks**: Best practices, security issues, deprecated syntax
|
||||
- **Key Rules**:
|
||||
- `no-log-password`: Ensure passwords aren't logged
|
||||
- `no-same-owner`: File ownership should be explicit
|
||||
- `partial-become`: Avoid unnecessary privilege escalation
|
||||
|
||||
### 2. Python Linting
|
||||
- **Tool**: `ruff` - Fast Python linter (replaces flake8, isort, etc.)
|
||||
- **Config**: `pyproject.toml`
|
||||
- **Style**: 120 character line length, Python 3.10+
|
||||
- **Checks**: Syntax errors, imports, code style
|
||||
|
||||
### 3. Shell Script Linting
|
||||
- **Tool**: `shellcheck`
|
||||
- **Checks**: All `.sh` files in the repository
|
||||
- **Catches**: Common shell scripting errors and pitfalls
|
||||
|
||||
### 4. YAML Linting
|
||||
- **Tool**: `yamllint`
|
||||
- **Config**: `.yamllint`
|
||||
- **Rules**: Extended from default with custom line length
|
||||
|
||||
### 5. GitHub Actions Security
|
||||
- **Tool**: `zizmor` - GitHub Actions security (run separately)
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
### Main Workflow (`main.yml`)
|
||||
- **syntax-check**: Validates Ansible playbook syntax
|
||||
- **basic-tests**: Runs unit tests including validation tests
|
||||
|
||||
### Lint Workflow (`lint.yml`)
|
||||
Separate workflow with parallel jobs:
|
||||
- **ansible-lint**: Ansible best practices
|
||||
- **yaml-lint**: YAML formatting
|
||||
- **python-lint**: Python code quality
|
||||
- **shellcheck**: Shell script validation
|
||||
|
||||
## Running Linters Locally
|
||||
|
||||
```bash
|
||||
# Ansible
|
||||
ansible-lint -v *.yml roles/{local,cloud-*}/*/*.yml
|
||||
|
||||
# Python
|
||||
ruff check .
|
||||
|
||||
# Shell
|
||||
find . -name "*.sh" -exec shellcheck {} \;
|
||||
|
||||
# YAML
|
||||
yamllint .
|
||||
```
|
||||
|
||||
## Current Status
|
||||
|
||||
Most linters are configured to warn rather than fail (`|| true`) to allow gradual adoption. As code quality improves, these should be changed to hard failures.
|
||||
|
||||
### Known Issues to Address:
|
||||
1. Python library modules need formatting updates
|
||||
2. Some Ansible tasks missing `changed_when` conditions
|
||||
3. YAML files have inconsistent indentation
|
||||
4. Shell scripts could use more error handling
|
||||
|
||||
## Contributing
|
||||
|
||||
When adding new code:
|
||||
1. Run relevant linters before committing
|
||||
2. Fix any errors (not just warnings)
|
||||
3. Add linting exceptions only with good justification
|
||||
4. Update linter configs if adding new file types
|
|
@ -1,14 +1,10 @@
|
|||
# Troubleshooting
|
||||
|
||||
First of all, check [this](https://github.com/trailofbits/algo#features) and ensure that you are deploying to the supported ubuntu version.
|
||||
First of all, check [this](https://github.com/trailofbits/algo#features) and ensure that you are deploying to Ubuntu 22.04 LTS, the only supported server platform.
|
||||
|
||||
* [Installation Problems](#installation-problems)
|
||||
* [Error: "You have not agreed to the Xcode license agreements"](#error-you-have-not-agreed-to-the-xcode-license-agreements)
|
||||
* [Error: checking whether the C compiler works... no](#error-checking-whether-the-c-compiler-works-no)
|
||||
* [Error: "fatal error: 'openssl/opensslv.h' file not found"](#error-fatal-error-opensslopensslvh-file-not-found)
|
||||
* [Error: "TypeError: must be str, not bytes"](#error-typeerror-must-be-str-not-bytes)
|
||||
* [Python version is not supported](#python-version-is-not-supported)
|
||||
* [Error: "ansible-playbook: command not found"](#error-ansible-playbook-command-not-found)
|
||||
* [Error: "Could not fetch URL ... TLSV1_ALERT_PROTOCOL_VERSION](#could-not-fetch-url--tlsv1_alert_protocol_version)
|
||||
* [Fatal: "Failed to validate the SSL certificate for ..."](#fatal-failed-to-validate-the-SSL-certificate)
|
||||
* [Bad owner or permissions on .ssh](#bad-owner-or-permissions-on-ssh)
|
||||
* [The region you want is not available](#the-region-you-want-is-not-available)
|
||||
|
@ -19,6 +15,7 @@ First of all, check [this](https://github.com/trailofbits/algo#features) and ens
|
|||
* [Azure: The client xxx with object id xxx does not have authorization to perform action Microsoft.Resources/subscriptions/resourcegroups/write' over scope](#azure-deployment-permissions-error)
|
||||
* [Windows: The value of parameter linuxConfiguration.ssh.publicKeys.keyData is invalid](#windows-the-value-of-parameter-linuxconfigurationsshpublickeyskeydata-is-invalid)
|
||||
* [Docker: Failed to connect to the host via ssh](#docker-failed-to-connect-to-the-host-via-ssh)
|
||||
* [Windows: "The parameter is incorrect" error when connecting](#windows-the-parameter-is-incorrect-error-when-connecting)
|
||||
* [Error: Failed to create symlinks for deploying to localhost](#error-failed-to-create-symlinks-for-deploying-to-localhost)
|
||||
* [Wireguard: Unable to find 'configs/...' in expected paths](#wireguard-unable-to-find-configs-in-expected-paths)
|
||||
* [Ubuntu Error: "unable to write 'random state'" when generating CA password](#ubuntu-error-unable-to-write-random-state-when-generating-ca-password)
|
||||
|
@ -30,7 +27,6 @@ First of all, check [this](https://github.com/trailofbits/algo#features) and ens
|
|||
* [Error: "The VPN Service payload could not be installed."](#error-the-vpn-service-payload-could-not-be-installed)
|
||||
* [Little Snitch is broken when connected to the VPN](#little-snitch-is-broken-when-connected-to-the-vpn)
|
||||
* [I can't get my router to connect to the Algo server](#i-cant-get-my-router-to-connect-to-the-algo-server)
|
||||
* [I can't get Network Manager to connect to the Algo server](#i-cant-get-network-manager-to-connect-to-the-algo-server)
|
||||
* [Various websites appear to be offline through the VPN](#various-websites-appear-to-be-offline-through-the-vpn)
|
||||
* [Clients appear stuck in a reconnection loop](#clients-appear-stuck-in-a-reconnection-loop)
|
||||
* [Wireguard: clients can connect on Wifi but not LTE](#wireguard-clients-can-connect-on-wifi-but-not-lte)
|
||||
|
@ -43,84 +39,13 @@ Look here if you have a problem running the installer to set up a new Algo serve
|
|||
|
||||
### Python version is not supported
|
||||
|
||||
The minimum Python version required to run Algo is 3.8. Most modern operation systems should have it by default, but if the OS you are using doesn't meet the requirements, you have to upgrade. See the official documentation for your OS, or manual download it from https://www.python.org/downloads/. Otherwise, you may [deploy from docker](deploy-from-docker.md)
|
||||
|
||||
### Error: "You have not agreed to the Xcode license agreements"
|
||||
|
||||
On macOS, you tried to install the dependencies with pip and encountered the following error:
|
||||
|
||||
```
|
||||
Downloading cffi-1.9.1.tar.gz (407kB): 407kB downloaded
|
||||
Running setup.py (path:/private/tmp/pip_build_root/cffi/setup.py) egg_info for package cffi
|
||||
|
||||
You have not agreed to the Xcode license agreements, please run 'xcodebuild -license' (for user-level acceptance) or 'sudo xcodebuild -license' (for system-wide acceptance) from within a Terminal window to review and agree to the Xcode license agreements.
|
||||
|
||||
No working compiler found, or bogus compiler options
|
||||
passed to the compiler from Python's distutils module.
|
||||
See the error messages above.
|
||||
|
||||
----------------------------------------
|
||||
Cleaning up...
|
||||
Command python setup.py egg_info failed with error code 1 in /private/tmp/pip_build_root/cffi
|
||||
Storing debug log for failure in /Users/algore/Library/Logs/pip.log
|
||||
```
|
||||
|
||||
The Xcode compiler is installed but requires you to accept its license agreement prior to using it. Run `xcodebuild -license` to agree and then retry installing the dependencies.
|
||||
|
||||
### Error: checking whether the C compiler works... no
|
||||
|
||||
On macOS, you tried to install the dependencies with pip and encountered the following error:
|
||||
|
||||
```
|
||||
Failed building wheel for pycrypto
|
||||
Running setup.py clean for pycrypto
|
||||
Failed to build pycrypto
|
||||
...
|
||||
copying lib/Crypto/Signature/PKCS1_v1_5.py -> build/lib.macosx-10.6-intel-2.7/Crypto/Signature
|
||||
running build_ext
|
||||
running build_configure
|
||||
checking for gcc... gcc
|
||||
checking whether the C compiler works... no
|
||||
configure: error: in '/private/var/folders/3f/q33hl6_x6_nfyjg29fcl9qdr0000gp/T/pip-build-DB5VZp/pycrypto': configure: error: C compiler cannot create executables See config.log for more details
|
||||
Traceback (most recent call last):
|
||||
File "", line 1, in
|
||||
...
|
||||
cmd_obj.run()
|
||||
File "/private/var/folders/3f/q33hl6_x6_nfyjg29fcl9qdr0000gp/T/pip-build-DB5VZp/pycrypto/setup.py", line 278, in run
|
||||
raise RuntimeError("autoconf error")
|
||||
RuntimeError: autoconf error
|
||||
```
|
||||
|
||||
You don't have a working compiler installed. You should install the XCode compiler by opening your terminal and running `xcode-select --install`.
|
||||
|
||||
### Error: "fatal error: 'openssl/opensslv.h' file not found"
|
||||
|
||||
On macOS, you tried to install `cryptography` and encountered the following error:
|
||||
|
||||
```
|
||||
build/temp.macosx-10.12-intel-2.7/_openssl.c:434:10: fatal error: 'openssl/opensslv.h' file not found
|
||||
|
||||
#include <openssl/opensslv.h>
|
||||
|
||||
^
|
||||
|
||||
1 error generated.
|
||||
|
||||
error: command 'cc' failed with exit status 1
|
||||
|
||||
----------------------------------------
|
||||
Cleaning up...
|
||||
Command /usr/bin/python -c "import setuptools, tokenize;__file__='/private/tmp/pip_build_root/cryptography/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-sREEE5-record/install-record.txt --single-version-externally-managed --compile failed with error code 1 in /private/tmp/pip_build_root/cryptography
|
||||
Storing debug log for failure in /Users/algore/Library/Logs/pip.log
|
||||
```
|
||||
|
||||
You are running an old version of `pip` that cannot download the binary `cryptography` dependency. Upgrade to a new version of `pip` by running `sudo python3 -m pip install -U pip`.
|
||||
The minimum Python version required to run Algo is 3.11. Most modern operation systems should have it by default, but if the OS you are using doesn't meet the requirements, you have to upgrade. See the official documentation for your OS, or manual download it from https://www.python.org/downloads/. Otherwise, you may [deploy from docker](deploy-from-docker.md)
|
||||
|
||||
### Error: "ansible-playbook: command not found"
|
||||
|
||||
You tried to install Algo and you see an error that reads "ansible-playbook: command not found."
|
||||
|
||||
You did not finish step 4 in the installation instructions, "[Install Algo's remaining dependencies](https://github.com/trailofbits/algo#deploy-the-algo-server)." Algo depends on [Ansible](https://github.com/ansible/ansible), an automation framework, and this error indicates that you do not have Ansible installed. Ansible is installed by `pip` when you run `python3 -m pip install -r requirements.txt`. You must complete the installation instructions to run the Algo server deployment process.
|
||||
This indicates that Ansible is not installed or not available in your PATH. Algo automatically installs all dependencies (including Ansible) using uv when you run `./algo` for the first time. If you're seeing this error, try running `./algo` again - it should automatically install the required Python environment and dependencies. If the issue persists, ensure you're running `./algo` from the Algo project directory.
|
||||
|
||||
### Fatal: "Failed to validate the SSL certificate"
|
||||
|
||||
|
@ -129,23 +54,7 @@ You received a message like this:
|
|||
fatal: [localhost]: FAILED! => {"changed": false, "msg": "Failed to validate the SSL certificate for api.digitalocean.com:443. Make sure your managed systems have a valid CA certificate installed. You can use validate_certs=False if you do not need to confirm the servers identity but this is unsafe and not recommended. Paths checked for this platform: /etc/ssl/certs, /etc/ansible, /usr/local/etc/openssl. The exception msg was: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1076).", "status": -1, "url": "https://api.digitalocean.com/v2/regions"}
|
||||
```
|
||||
|
||||
Your local system does not have a CA certificate that can validate the cloud provider's API. Are you using MacPorts instead of Homebrew? The MacPorts openssl installation does not include a CA certificate, but you can fix this by installing the [curl-ca-bundle](https://andatche.com/articles/2012/02/fixing-ssl-ca-certificates-with-openssl-from-macports/) port with `port install curl-ca-bundle`. That should do the trick.
|
||||
|
||||
### Could not fetch URL ... TLSV1_ALERT_PROTOCOL_VERSION
|
||||
|
||||
You tried to install Algo and you received an error like this one:
|
||||
|
||||
```
|
||||
Could not fetch URL https://pypi.python.org/simple/secretstorage/: There was a problem confirming the ssl certificate: [SSL: TLSV1_ALERT_PROTOCOL_VERSION] tlsv1 alert protocol version (_ssl.c:590) - skipping
|
||||
Could not find a version that satisfies the requirement SecretStorage<3 (from -r requirements.txt (line 2)) (from versions: )
|
||||
No matching distribution found for SecretStorage<3 (from -r requirements.txt (line 2))
|
||||
```
|
||||
|
||||
It's time to upgrade your python.
|
||||
|
||||
`brew upgrade python3`
|
||||
|
||||
You can also download python 3.7.x from python.org.
|
||||
Your local system does not have a CA certificate that can validate the cloud provider's API. This typically occurs with custom Python installations. Try reinstalling Python using Homebrew (`brew install python3`) or ensure your system has proper CA certificates installed.
|
||||
|
||||
### Bad owner or permissions on .ssh
|
||||
|
||||
|
@ -234,9 +143,9 @@ The error is caused because Digital Ocean changed its API to treat the tag argum
|
|||
An exception occurred during task execution. To see the full traceback, use -vvv.
|
||||
The error was: FileNotFoundError: [Errno 2] No such file or directory: '/home/ubuntu/.azure/azureProfile.json'
|
||||
fatal: [localhost]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):
|
||||
File \"/usr/local/lib/python3.6/dist-packages/azure/cli/core/_session.py\", line 39, in load
|
||||
File \"/usr/local/lib/python3.11/dist-packages/azure/cli/core/_session.py\", line 39, in load
|
||||
with codecs_open(self.filename, 'r', encoding=self._encoding) as f:
|
||||
File \"/usr/lib/python3.6/codecs.py\", line 897, in open\n file = builtins.open(filename, mode, buffering)
|
||||
File \"/usr/lib/python3.11/codecs.py\", line 897, in open\n file = builtins.open(filename, mode, buffering)
|
||||
FileNotFoundError: [Errno 2] No such file or directory: '/home/ubuntu/.azure/azureProfile.json'
|
||||
", "module_stdout": "", "msg": "MODULE FAILURE
|
||||
See stdout/stderr for the exact error", "rc": 1}
|
||||
|
@ -294,6 +203,43 @@ You need to add the following to the ansible.cfg in repo root:
|
|||
control_path_dir=/dev/shm/ansible_control_path
|
||||
```
|
||||
|
||||
### Windows: "The parameter is incorrect" error when connecting
|
||||
|
||||
When trying to connect to your Algo VPN on Windows 10/11, you may receive an error stating "The parameter is incorrect". This is a common issue that can usually be resolved by resetting your Windows networking stack.
|
||||
|
||||
#### Solution
|
||||
|
||||
1. **Clear the networking caches**
|
||||
|
||||
Open Command Prompt as Administrator (right-click on Command Prompt and select "Run as Administrator") and run these commands:
|
||||
```cmd
|
||||
netsh int ip reset
|
||||
netsh int ipv6 reset
|
||||
netsh winsock reset
|
||||
```
|
||||
|
||||
Then restart your computer.
|
||||
|
||||
2. **Reset Device Manager network adapters** (if step 1 doesn't work)
|
||||
|
||||
- Open Device Manager
|
||||
- Find "Network Adapters"
|
||||
- Uninstall all WAN Miniport drivers (IKEv2, IP, IPv6, etc.)
|
||||
- Click Action → Scan for hardware changes
|
||||
- The adapters you just uninstalled should reinstall automatically
|
||||
|
||||
Try connecting to the VPN again.
|
||||
|
||||
#### What causes this issue?
|
||||
|
||||
This error typically occurs when:
|
||||
- Windows networking stack becomes corrupted
|
||||
- After Windows updates that affect network drivers
|
||||
- When switching between different VPN configurations
|
||||
- After network-related software installations/uninstallations
|
||||
|
||||
Note: This issue has been reported by many users and the above solution has proven effective in most cases.
|
||||
|
||||
### Error: Failed to create symlinks for deploying to localhost
|
||||
|
||||
You tried to run Algo and you received an error like this one:
|
||||
|
@ -339,7 +285,7 @@ TASK [wireguard : Generate public keys] ****************************************
|
|||
|
||||
fatal: [localhost]: FAILED! => {"msg": "An unhandled exception occurred while running the lookup plugin 'file'. Error was a <class 'ansible.errors.AnsibleError'>, original message: could not locate file in lookup: configs/xxx.xxx.xxx.xxx/wireguard//private/dan"}
|
||||
```
|
||||
This error is usually hit when using the local install option on a server that isn't Ubuntu 18.04 or later. You should upgrade your server to Ubuntu 18.04 or later. If this doesn't work, try removing files in /etc/wireguard/ and the configs directories as follows:
|
||||
This error is usually hit when using the local install option on an unsupported server. Algo requires Ubuntu 22.04 LTS. You should upgrade your server to Ubuntu 22.04 LTS. If this doesn't work, try removing files in /etc/wireguard/ and the configs directories as follows:
|
||||
|
||||
```ssh
|
||||
sudo rm -rf /etc/wireguard/*
|
||||
|
@ -418,10 +364,6 @@ Little Snitch is not compatible with IPSEC VPNs due to a known bug in macOS and
|
|||
|
||||
In order to connect to the Algo VPN server, your router must support IKEv2, ECC certificate-based authentication, and the cipher suite we use. See the ipsec.conf files we generate in the `config` folder for more information. Note that we do not officially support routers as clients for Algo VPN at this time, though patches and documentation for them are welcome (for example, see open issues for [Ubiquiti](https://github.com/trailofbits/algo/issues/307) and [pfSense](https://github.com/trailofbits/algo/issues/292)).
|
||||
|
||||
### I can't get Network Manager to connect to the Algo server
|
||||
|
||||
You're trying to connect Ubuntu or Debian to the Algo server through the Network Manager GUI but it's not working. Many versions of Ubuntu and some older versions of Debian bundle a [broken version of Network Manager](https://github.com/trailofbits/algo/issues/263) without support for modern standards or the strongSwan server. You must upgrade to Ubuntu 17.04 or Debian 9 Stretch, each of which contain the required minimum version of Network Manager.
|
||||
|
||||
### Various websites appear to be offline through the VPN
|
||||
|
||||
This issue appears occasionally due to issues with [MTU](https://en.wikipedia.org/wiki/Maximum_transmission_unit) size. Different networks may require the MTU to be within a specific range to correctly pass traffic. We made an effort to set the MTU to the most conservative, most compatible size by default but problems may still occur.
|
||||
|
@ -493,7 +435,7 @@ For IPsec on Linux you can change the MTU of your network interface to match the
|
|||
```
|
||||
sudo ifconfig eth0 mtu 1440
|
||||
```
|
||||
To make the change take affect after a reboot, on Ubuntu 18.04 and later edit the relevant file in the `/etc/netplan` directory (see `man netplan`).
|
||||
To make the change take effect after a reboot, on Ubuntu 22.04 LTS edit the relevant file in the `/etc/netplan` directory (see `man netplan`).
|
||||
|
||||
#### Note for WireGuard iOS users
|
||||
|
||||
|
|
60
files/cloud-init/README.md
Normal file
60
files/cloud-init/README.md
Normal file
|
@ -0,0 +1,60 @@
|
|||
# Cloud-Init Files - Critical Format Requirements
|
||||
|
||||
## ⚠️ CRITICAL WARNING ⚠️
|
||||
|
||||
The files in this directory have **STRICT FORMAT REQUIREMENTS** that must not be changed by linters or automated formatting tools.
|
||||
|
||||
## Cloud-Config Header Format
|
||||
|
||||
The first line of `base.yml` **MUST** be exactly:
|
||||
```
|
||||
#cloud-config
|
||||
```
|
||||
|
||||
### ❌ DO NOT CHANGE TO:
|
||||
- `# cloud-config` (space after #) - **BREAKS CLOUD-INIT PARSING**
|
||||
- Add YAML document start `---` - **NOT ALLOWED IN CLOUD-INIT**
|
||||
|
||||
### Why This Matters
|
||||
|
||||
Cloud-init's YAML parser expects the exact string `#cloud-config` as the first line. Any deviation causes:
|
||||
|
||||
1. **Complete parsing failure** - All directives are skipped
|
||||
2. **SSH configuration not applied** - Servers remain on port 22 instead of 4160
|
||||
3. **Deployment timeouts** - Ansible cannot connect to configure the VPN
|
||||
4. **DigitalOcean specific impact** - Other providers may be more tolerant
|
||||
|
||||
## Historical Context
|
||||
|
||||
- **Working**: All versions before PR #14775 (August 2025)
|
||||
- **Broken**: PR #14775 "Apply ansible-lint improvements" added space by mistake
|
||||
- **Fixed**: PR #14801 restored correct format + added protections
|
||||
|
||||
See GitHub issue #14800 for full technical details.
|
||||
|
||||
## Linter Configuration
|
||||
|
||||
These files are **excluded** from:
|
||||
- `yamllint` (`.yamllint` config)
|
||||
- `ansible-lint` (`.ansible-lint` config)
|
||||
|
||||
This prevents automated tools from "fixing" the format and breaking deployments.
|
||||
|
||||
## Template Variables
|
||||
|
||||
The cloud-init files use Jinja2 templating:
|
||||
- `{{ ssh_port }}` - Configured SSH port (typically 4160)
|
||||
- `{{ lookup('file', '{{ SSH_keys.public }}') }}` - SSH public key
|
||||
|
||||
## Editing Guidelines
|
||||
|
||||
1. **Never** run automated formatters on these files
|
||||
2. **Test immediately** after any changes with real deployments
|
||||
3. **Check yamllint warnings** are expected (missing space in comment, missing ---)
|
||||
4. **Verify first line** remains exactly `#cloud-config`
|
||||
|
||||
## References
|
||||
|
||||
- [Cloud-init documentation](https://cloudinit.readthedocs.io/)
|
||||
- [Cloud-config examples](https://cloudinit.readthedocs.io/en/latest/reference/examples.html)
|
||||
- [GitHub Issue #14800](https://github.com/trailofbits/algo/issues/14800)
|
|
@ -1,4 +1,8 @@
|
|||
# cloud-config
|
||||
#cloud-config
|
||||
# CRITICAL: The above line MUST be exactly "#cloud-config" (no space after #)
|
||||
# This is required by cloud-init's YAML parser. Adding a space breaks parsing
|
||||
# and causes all cloud-init directives to be skipped, resulting in SSH timeouts.
|
||||
# See: https://github.com/trailofbits/algo/issues/14800
|
||||
output: {all: '| tee -a /var/log/cloud-init-output.log'}
|
||||
|
||||
package_update: true
|
||||
|
@ -6,6 +10,16 @@ package_upgrade: true
|
|||
|
||||
packages:
|
||||
- sudo
|
||||
{% if performance_preinstall_packages | default(false) %}
|
||||
# Universal tools always needed by Algo (performance optimization)
|
||||
- git
|
||||
- screen
|
||||
- apparmor-utils
|
||||
- uuid-runtime
|
||||
- coreutils
|
||||
- iptables-persistent
|
||||
- cgroup-tools
|
||||
{% endif %}
|
||||
|
||||
users:
|
||||
- default
|
||||
|
@ -21,7 +35,7 @@ users:
|
|||
write_files:
|
||||
- path: /etc/ssh/sshd_config
|
||||
content: |
|
||||
{{ lookup('template', 'files/cloud-init/sshd_config') | indent(width=6) }}
|
||||
{{ lookup('template', 'files/cloud-init/sshd_config') | indent(width=6, first=True) }}
|
||||
|
||||
runcmd:
|
||||
- set -x
|
||||
|
|
63
install.sh
63
install.sh
|
@ -22,19 +22,20 @@ installRequirements() {
|
|||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update
|
||||
apt-get install \
|
||||
python3-virtualenv \
|
||||
curl \
|
||||
jq -y
|
||||
|
||||
# Install uv
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
export PATH="$HOME/.local/bin:$HOME/.cargo/bin:$PATH"
|
||||
}
|
||||
|
||||
getAlgo() {
|
||||
[ ! -d "algo" ] && git clone "https://github.com/${REPO_SLUG}" -b "${REPO_BRANCH}" algo
|
||||
cd algo
|
||||
|
||||
python3 -m virtualenv --python="$(command -v python3)" .env
|
||||
# shellcheck source=/dev/null
|
||||
. .env/bin/activate
|
||||
python3 -m pip install -U pip virtualenv
|
||||
python3 -m pip install -r requirements.txt
|
||||
# uv handles all dependency installation automatically
|
||||
uv sync
|
||||
}
|
||||
|
||||
publicIpFromInterface() {
|
||||
|
@ -45,15 +46,47 @@ publicIpFromInterface() {
|
|||
echo "Using ${ENDPOINT} as the endpoint"
|
||||
}
|
||||
|
||||
tryGetMetadata() {
|
||||
# Helper function to fetch metadata with retry
|
||||
url="$1"
|
||||
headers="$2"
|
||||
response=""
|
||||
|
||||
# Try up to 2 times
|
||||
for attempt in 1 2; do
|
||||
if [ -n "$headers" ]; then
|
||||
response="$(curl -s --connect-timeout 5 --max-time "${METADATA_TIMEOUT}" -H "$headers" "$url" || true)"
|
||||
else
|
||||
response="$(curl -s --connect-timeout 5 --max-time "${METADATA_TIMEOUT}" "$url" || true)"
|
||||
fi
|
||||
|
||||
# If we got a response, return it
|
||||
if [ -n "$response" ]; then
|
||||
echo "$response"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Wait before retry (only on first attempt)
|
||||
[ $attempt -eq 1 ] && sleep 2
|
||||
done
|
||||
|
||||
# Return empty string if all attempts failed
|
||||
echo ""
|
||||
return 1
|
||||
}
|
||||
|
||||
publicIpFromMetadata() {
|
||||
if curl -s http://169.254.169.254/metadata/v1/vendor-data | grep DigitalOcean >/dev/null; then
|
||||
ENDPOINT="$(curl -s http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address)"
|
||||
elif test "$(curl -s http://169.254.169.254/latest/meta-data/services/domain)" = "amazonaws.com"; then
|
||||
ENDPOINT="$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)"
|
||||
# Set default timeout from environment or use 20 seconds
|
||||
METADATA_TIMEOUT="${METADATA_TIMEOUT:-20}"
|
||||
|
||||
if tryGetMetadata "http://169.254.169.254/metadata/v1/vendor-data" "" | grep DigitalOcean >/dev/null; then
|
||||
ENDPOINT="$(tryGetMetadata "http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address" "")"
|
||||
elif test "$(tryGetMetadata "http://169.254.169.254/latest/meta-data/services/domain" "")" = "amazonaws.com"; then
|
||||
ENDPOINT="$(tryGetMetadata "http://169.254.169.254/latest/meta-data/public-ipv4" "")"
|
||||
elif host -t A -W 10 metadata.google.internal 127.0.0.53 >/dev/null; then
|
||||
ENDPOINT="$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")"
|
||||
elif test "$(curl -s -H Metadata:true 'http://169.254.169.254/metadata/instance/compute/publisher/?api-version=2017-04-02&format=text')" = "Canonical"; then
|
||||
ENDPOINT="$(curl -H Metadata:true 'http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-04-02&format=text')"
|
||||
ENDPOINT="$(tryGetMetadata "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip" "Metadata-Flavor: Google")"
|
||||
elif test "$(tryGetMetadata "http://169.254.169.254/metadata/instance/compute/publisher/?api-version=2017-04-02&format=text" "Metadata:true")" = "Canonical"; then
|
||||
ENDPOINT="$(tryGetMetadata "http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-04-02&format=text" "Metadata:true")"
|
||||
fi
|
||||
|
||||
if echo "${ENDPOINT}" | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"; then
|
||||
|
@ -68,15 +101,13 @@ deployAlgo() {
|
|||
getAlgo
|
||||
|
||||
cd /opt/algo
|
||||
# shellcheck source=/dev/null
|
||||
. .env/bin/activate
|
||||
|
||||
export HOME=/root
|
||||
export ANSIBLE_LOCAL_TEMP=/root/.ansible/tmp
|
||||
export ANSIBLE_REMOTE_TEMP=/root/.ansible/tmp
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
ansible-playbook main.yml \
|
||||
uv run ansible-playbook main.yml \
|
||||
-e provider=local \
|
||||
-e "ondemand_cellular=${ONDEMAND_CELLULAR}" \
|
||||
-e "ondemand_wifi=${ONDEMAND_WIFI}" \
|
||||
|
|
|
@ -3,7 +3,11 @@
|
|||
# (c) 2015, Patrick F. Marques <patrickfmarques@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
import json
|
||||
import time
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||
from ansible.module_utils.digital_ocean import DigitalOceanHelper
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
|
@ -104,12 +108,6 @@ data:
|
|||
}
|
||||
'''
|
||||
|
||||
import json
|
||||
import time
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||
from ansible.module_utils.digital_ocean import DigitalOceanHelper
|
||||
|
||||
|
||||
class Response:
|
||||
|
||||
|
@ -138,9 +136,8 @@ def wait_action(module, rest, ip, action_id, timeout=10):
|
|||
end_time = time.time() + 10
|
||||
while time.time() < end_time:
|
||||
response = rest.get(f'floating_ips/{ip}/actions/{action_id}')
|
||||
status_code = response.status_code
|
||||
# status_code = response.status_code # TODO: check status_code == 200?
|
||||
status = response.json['action']['status']
|
||||
# TODO: check status_code == 200?
|
||||
if status == 'completed':
|
||||
return True
|
||||
elif status == 'errored':
|
||||
|
@ -150,7 +147,7 @@ def wait_action(module, rest, ip, action_id, timeout=10):
|
|||
|
||||
|
||||
def core(module):
|
||||
api_token = module.params['oauth_token']
|
||||
# api_token = module.params['oauth_token'] # unused for now
|
||||
state = module.params['state']
|
||||
ip = module.params['ip']
|
||||
droplet_id = module.params['droplet_id']
|
||||
|
@ -185,7 +182,7 @@ def get_floating_ip_details(module, rest):
|
|||
if status_code == 200:
|
||||
return json_data['floating_ip']
|
||||
else:
|
||||
module.fail_json(msg="Error assigning floating ip [{0}: {1}]".format(
|
||||
module.fail_json(msg="Error assigning floating ip [{}: {}]".format(
|
||||
status_code, json_data["message"]), region=module.params['region'])
|
||||
|
||||
|
||||
|
@ -205,7 +202,7 @@ def assign_floating_id_to_droplet(module, rest):
|
|||
|
||||
module.exit_json(changed=True, data=json_data)
|
||||
else:
|
||||
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
|
||||
module.fail_json(msg="Error creating floating ip [{}: {}]".format(
|
||||
status_code, json_data["message"]), region=module.params['region'])
|
||||
|
||||
|
||||
|
@ -247,26 +244,26 @@ def create_floating_ips(module, rest):
|
|||
if status_code == 202:
|
||||
module.exit_json(changed=True, data=json_data)
|
||||
else:
|
||||
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
|
||||
module.fail_json(msg="Error creating floating ip [{}: {}]".format(
|
||||
status_code, json_data["message"]), region=module.params['region'])
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(choices=['present', 'absent'], default='present'),
|
||||
ip=dict(aliases=['id'], required=False),
|
||||
region=dict(required=False),
|
||||
droplet_id=dict(required=False, type='int'),
|
||||
oauth_token=dict(
|
||||
no_log=True,
|
||||
argument_spec={
|
||||
'state': {'choices': ['present', 'absent'], 'default': 'present'},
|
||||
'ip': {'aliases': ['id'], 'required': False},
|
||||
'region': {'required': False},
|
||||
'droplet_id': {'required': False, 'type': 'int'},
|
||||
'oauth_token': {
|
||||
'no_log': True,
|
||||
# Support environment variable for DigitalOcean OAuth Token
|
||||
fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN']),
|
||||
required=True,
|
||||
),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
timeout=dict(type='int', default=30),
|
||||
),
|
||||
'fallback': (env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN']),
|
||||
'required': True,
|
||||
},
|
||||
'validate_certs': {'type': 'bool', 'default': True},
|
||||
'timeout': {'type': 'int', 'default': 30},
|
||||
},
|
||||
required_if=[
|
||||
('state', 'delete', ['ip'])
|
||||
],
|
||||
|
|
|
@ -2,26 +2,23 @@
|
|||
|
||||
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.gcp_utils import GcpModule, GcpSession, navigate_hash
|
||||
|
||||
################################################################################
|
||||
# Documentation
|
||||
################################################################################
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
|
||||
|
||||
################################################################################
|
||||
# Imports
|
||||
################################################################################
|
||||
import json
|
||||
|
||||
from ansible.module_utils.gcp_utils import GcpModule, GcpSession, navigate_hash
|
||||
|
||||
################################################################################
|
||||
# Main
|
||||
################################################################################
|
||||
|
||||
|
||||
def main():
|
||||
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), scope=dict(required=True, type='str')))
|
||||
module = GcpModule(argument_spec={'filters': {'type': 'list', 'elements': 'str'}, 'scope': {'required': True, 'type': 'str'}})
|
||||
|
||||
if module._name == 'gcp_compute_image_facts':
|
||||
module.deprecate("The 'gcp_compute_image_facts' module has been renamed to 'gcp_compute_regions_info'", version='2.13')
|
||||
|
@ -59,7 +56,7 @@ def query_options(filters):
|
|||
for f in filters:
|
||||
# For multiple queries, all queries should have ()
|
||||
if f[0] != '(' and f[-1] != ')':
|
||||
queries.append("(%s)" % ''.join(f))
|
||||
queries.append("({})".format(''.join(f)))
|
||||
else:
|
||||
queries.append(f)
|
||||
|
||||
|
@ -79,7 +76,7 @@ def return_if_object(module, response):
|
|||
module.raise_for_status(response)
|
||||
result = response.json()
|
||||
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
|
||||
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
|
||||
module.fail_json(msg=f"Invalid JSON response with error: {inst}")
|
||||
|
||||
if navigate_hash(result, ['error', 'errors']):
|
||||
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
|
||||
|
|
135
library/x25519_pubkey.py
Executable file
135
library/x25519_pubkey.py
Executable file
|
@ -0,0 +1,135 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# x25519_pubkey.py - Ansible module to derive a base64-encoded WireGuard-compatible public key
|
||||
# from a base64-encoded 32-byte X25519 private key.
|
||||
#
|
||||
# Why: community.crypto does not provide raw public key derivation for X25519 keys.
|
||||
|
||||
import base64
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
from cryptography.hazmat.primitives.asymmetric import x25519
|
||||
|
||||
"""
|
||||
Ansible module to derive base64-encoded X25519 public keys from private keys.
|
||||
|
||||
Supports both base64-encoded strings and raw 32-byte key files.
|
||||
Used for WireGuard key generation where community.crypto lacks raw public key derivation.
|
||||
|
||||
Parameters:
|
||||
- private_key_b64: Base64-encoded X25519 private key string
|
||||
- private_key_path: Path to file containing X25519 private key (base64 or raw 32 bytes)
|
||||
- public_key_path: Path where the derived public key should be written
|
||||
|
||||
Returns:
|
||||
- public_key: Base64-encoded X25519 public key
|
||||
- changed: Whether the public key file was modified
|
||||
- public_key_path: Path where public key was written (if specified)
|
||||
"""
|
||||
|
||||
|
||||
def run_module():
|
||||
"""
|
||||
Main execution function for the x25519_pubkey Ansible module.
|
||||
|
||||
Handles parameter validation, private key processing, public key derivation,
|
||||
and optional file output with idempotent behavior.
|
||||
"""
|
||||
module_args = {
|
||||
'private_key_b64': {'type': 'str', 'required': False},
|
||||
'private_key_path': {'type': 'path', 'required': False},
|
||||
'public_key_path': {'type': 'path', 'required': False},
|
||||
}
|
||||
|
||||
result = {
|
||||
'changed': False,
|
||||
'public_key': '',
|
||||
}
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
required_one_of=[['private_key_b64', 'private_key_path']],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
priv_b64 = None
|
||||
|
||||
if module.params['private_key_path']:
|
||||
try:
|
||||
with open(module.params['private_key_path'], 'rb') as f:
|
||||
data = f.read()
|
||||
try:
|
||||
# First attempt: assume file contains base64 text data
|
||||
# Strip whitespace from edges for text files (safe for base64 strings)
|
||||
stripped_data = data.strip()
|
||||
base64.b64decode(stripped_data, validate=True)
|
||||
priv_b64 = stripped_data.decode()
|
||||
except (base64.binascii.Error, ValueError):
|
||||
# Second attempt: assume file contains raw binary data
|
||||
# CRITICAL: Do NOT strip raw binary data - X25519 keys can contain
|
||||
# whitespace-like bytes (0x09, 0x0A, etc.) that must be preserved
|
||||
# Stripping would corrupt the key and cause "got 31 bytes" errors
|
||||
if len(data) != 32:
|
||||
module.fail_json(msg=f"Private key file must be either base64 or exactly 32 raw bytes, got {len(data)} bytes")
|
||||
priv_b64 = base64.b64encode(data).decode()
|
||||
except OSError as e:
|
||||
module.fail_json(msg=f"Failed to read private key file: {e}")
|
||||
else:
|
||||
priv_b64 = module.params['private_key_b64']
|
||||
|
||||
# Validate input parameters
|
||||
if not priv_b64:
|
||||
module.fail_json(msg="No private key provided")
|
||||
|
||||
try:
|
||||
priv_raw = base64.b64decode(priv_b64, validate=True)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=f"Invalid base64 private key format: {e}")
|
||||
|
||||
if len(priv_raw) != 32:
|
||||
module.fail_json(msg=f"Private key must decode to exactly 32 bytes, got {len(priv_raw)}")
|
||||
|
||||
try:
|
||||
priv_key = x25519.X25519PrivateKey.from_private_bytes(priv_raw)
|
||||
pub_key = priv_key.public_key()
|
||||
pub_raw = pub_key.public_bytes(
|
||||
encoding=serialization.Encoding.Raw,
|
||||
format=serialization.PublicFormat.Raw
|
||||
)
|
||||
pub_b64 = base64.b64encode(pub_raw).decode()
|
||||
result['public_key'] = pub_b64
|
||||
|
||||
if module.params['public_key_path']:
|
||||
pub_path = module.params['public_key_path']
|
||||
existing = None
|
||||
|
||||
try:
|
||||
with open(pub_path) as f:
|
||||
existing = f.read().strip()
|
||||
except OSError:
|
||||
existing = None
|
||||
|
||||
if existing != pub_b64:
|
||||
try:
|
||||
with open(pub_path, 'w') as f:
|
||||
f.write(pub_b64)
|
||||
result['changed'] = True
|
||||
except OSError as e:
|
||||
module.fail_json(msg=f"Failed to write public key file: {e}")
|
||||
|
||||
result['public_key_path'] = pub_path
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg=f"Failed to derive public key: {e}")
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
"""Entry point when module is executed directly."""
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
40
main.yml
40
main.yml
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
- hosts: localhost
|
||||
- name: Algo VPN Setup
|
||||
hosts: localhost
|
||||
become: false
|
||||
tasks:
|
||||
- name: Playbook dir stat
|
||||
|
@ -16,39 +17,48 @@
|
|||
|
||||
- name: Ensure the requirements installed
|
||||
debug:
|
||||
msg: "{{ '' | ipaddr }}"
|
||||
msg: "{{ '192.168.1.1' | ansible.utils.ipaddr }}"
|
||||
ignore_errors: true
|
||||
no_log: true
|
||||
register: ipaddr
|
||||
|
||||
- name: Set required ansible version as a fact
|
||||
- name: Extract ansible version from pyproject.toml
|
||||
set_fact:
|
||||
required_ansible_version: "{{ item | regex_replace('^ansible[\\s+]?(?P<op>[=,>,<]+)[\\s+]?(?P<ver>\\d.\\d+(.\\d+)?)$', '{\"op\": \"\\g<op>\",\"ver\"\
|
||||
: \"\\g<ver>\" }') }}"
|
||||
when: '"ansible" in item'
|
||||
with_items: "{{ lookup('file', 'requirements.txt').splitlines() }}"
|
||||
ansible_requirement: "{{ lookup('file', 'pyproject.toml') | regex_search('ansible==[0-9]+\\.[0-9]+\\.[0-9]+') }}"
|
||||
|
||||
- name: Just get the list from default pip
|
||||
community.general.pip_package_info:
|
||||
register: pip_package_info
|
||||
- name: Parse ansible version requirement
|
||||
set_fact:
|
||||
required_ansible_version:
|
||||
op: "{{ ansible_requirement | regex_replace('^ansible\\s*([~>=<]+)\\s*.*$', '\\1') }}"
|
||||
ver: "{{ ansible_requirement | regex_replace('^ansible\\s*[~>=<]+\\s*(\\d+\\.\\d+(?:\\.\\d+)?).*$', '\\1') }}"
|
||||
when: ansible_requirement is defined
|
||||
|
||||
- name: Get current ansible package version
|
||||
command: uv pip list
|
||||
register: uv_package_list
|
||||
changed_when: false
|
||||
|
||||
- name: Extract ansible version from uv package list
|
||||
set_fact:
|
||||
current_ansible_version: "{{ uv_package_list.stdout | regex_search('ansible\\s+([0-9]+\\.[0-9]+\\.[0-9]+)', '\\1') | first }}"
|
||||
|
||||
- name: Verify Python meets Algo VPN requirements
|
||||
assert:
|
||||
that: (ansible_python.version.major|string + '.' + ansible_python.version.minor|string) is version('3.8', '>=')
|
||||
that: (ansible_python.version.major|string + '.' + ansible_python.version.minor|string) is version('3.11', '>=')
|
||||
msg: >
|
||||
Python version is not supported.
|
||||
You must upgrade to at least Python 3.8 to use this version of Algo.
|
||||
You must upgrade to at least Python 3.11 to use this version of Algo.
|
||||
See for more details - https://trailofbits.github.io/algo/troubleshooting.html#python-version-is-not-supported
|
||||
|
||||
- name: Verify Ansible meets Algo VPN requirements
|
||||
assert:
|
||||
that:
|
||||
- pip_package_info.packages.pip.ansible.0.version is version(required_ansible_version.ver, required_ansible_version.op)
|
||||
- current_ansible_version is version(required_ansible_version.ver, required_ansible_version.op)
|
||||
- not ipaddr.failed
|
||||
msg: >
|
||||
Ansible version is {{ pip_package_info.packages.pip.ansible.0.version }}.
|
||||
Ansible version is {{ current_ansible_version }}.
|
||||
You must update the requirements to use this version of Algo.
|
||||
Try to run python3 -m pip install -U -r requirements.txt
|
||||
Try to run: uv sync
|
||||
|
||||
- name: Include prompts playbook
|
||||
import_playbook: input.yml
|
||||
|
|
|
@ -49,7 +49,10 @@
|
|||
- debug:
|
||||
var: IP_subject_alt_name
|
||||
|
||||
- name: Wait 600 seconds for target connection to become reachable/usable
|
||||
- name: Wait for target connection to become reachable/usable
|
||||
wait_for_connection:
|
||||
delay: 10 # Wait 10 seconds before first attempt (conservative)
|
||||
timeout: 480 # Reduce from 600 to 480 seconds (8 minutes - safer)
|
||||
sleep: 10 # Check every 10 seconds (less aggressive polling)
|
||||
delegate_to: "{{ item }}"
|
||||
loop: "{{ groups['vpn-host'] }}"
|
||||
|
|
|
@ -16,28 +16,38 @@
|
|||
> /dev/tty || true
|
||||
tags: debug
|
||||
|
||||
- name: Install the requirements
|
||||
pip:
|
||||
state: present
|
||||
name:
|
||||
- pyOpenSSL>=0.15
|
||||
- segno
|
||||
tags:
|
||||
- always
|
||||
- skip_ansible_lint
|
||||
# Install cloud provider specific dependencies
|
||||
- name: Install cloud provider dependencies
|
||||
shell: uv pip install '.[{{ cloud_provider_extra }}]'
|
||||
vars:
|
||||
cloud_provider_extra: >-
|
||||
{%- if algo_provider in ['ec2', 'lightsail'] -%}aws
|
||||
{%- elif algo_provider == 'azure' -%}azure
|
||||
{%- elif algo_provider == 'gce' -%}gcp
|
||||
{%- elif algo_provider == 'hetzner' -%}hetzner
|
||||
{%- elif algo_provider == 'linode' -%}linode
|
||||
{%- elif algo_provider == 'openstack' -%}openstack
|
||||
{%- elif algo_provider == 'cloudstack' -%}cloudstack
|
||||
{%- else -%}{{ algo_provider }}
|
||||
{%- endif -%}
|
||||
when: algo_provider != "local"
|
||||
changed_when: false
|
||||
|
||||
# Note: pyOpenSSL and segno are now included in pyproject.toml dependencies
|
||||
# and installed automatically by uv sync
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- block:
|
||||
- name: Generate the SSH private key
|
||||
openssl_privatekey:
|
||||
community.crypto.openssl_privatekey:
|
||||
path: "{{ SSH_keys.private }}"
|
||||
size: 4096
|
||||
mode: "0600"
|
||||
type: RSA
|
||||
|
||||
- name: Generate the SSH public key
|
||||
openssl_publickey:
|
||||
community.crypto.openssl_publickey:
|
||||
path: "{{ SSH_keys.public }}"
|
||||
privatekey_path: "{{ SSH_keys.private }}"
|
||||
format: OpenSSH
|
||||
|
|
|
@ -1,7 +1,62 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=68.0.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "algo"
|
||||
description = "Set up a personal IPSEC VPN in the cloud"
|
||||
version = "2.0.0-beta"
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
"ansible==11.8.0",
|
||||
"jinja2>=3.1.6",
|
||||
"netaddr==1.3.0",
|
||||
"pyyaml>=6.0.2",
|
||||
"pyopenssl>=0.15",
|
||||
"segno>=1.6.0",
|
||||
]
|
||||
|
||||
[tool.setuptools]
|
||||
# Explicitly disable package discovery since Algo is not a Python package
|
||||
py-modules = []
|
||||
|
||||
[project.optional-dependencies]
|
||||
# Cloud provider dependencies (installed automatically based on provider selection)
|
||||
aws = [
|
||||
"boto3>=1.34.0",
|
||||
"boto>=2.49.0",
|
||||
]
|
||||
azure = [
|
||||
"azure-identity>=1.15.0",
|
||||
"azure-mgmt-compute>=30.0.0",
|
||||
"azure-mgmt-network>=25.0.0",
|
||||
"azure-mgmt-resource>=23.0.0",
|
||||
"msrestazure>=0.6.4",
|
||||
]
|
||||
gcp = [
|
||||
"google-auth>=2.28.0",
|
||||
"requests>=2.31.0",
|
||||
]
|
||||
hetzner = [
|
||||
"hcloud>=1.33.0",
|
||||
]
|
||||
linode = [
|
||||
"linode-api4>=5.15.0",
|
||||
]
|
||||
openstack = [
|
||||
"openstacksdk>=2.1.0",
|
||||
]
|
||||
cloudstack = [
|
||||
"cs>=3.0.0",
|
||||
"sshpubkeys>=3.3.1",
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
# Ruff configuration
|
||||
target-version = "py310"
|
||||
target-version = "py311"
|
||||
line-length = 120
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = [
|
||||
"E", # pycodestyle errors
|
||||
"W", # pycodestyle warnings
|
||||
|
@ -13,4 +68,31 @@ select = [
|
|||
]
|
||||
ignore = [
|
||||
"E501", # line too long (handled by formatter)
|
||||
]
|
||||
"B011", # assert False is acceptable in test code
|
||||
]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"library/*" = ["ALL"] # Exclude Ansible library modules (external code)
|
||||
|
||||
[tool.uv]
|
||||
# Centralized uv version management
|
||||
dev-dependencies = [
|
||||
"pytest>=8.0.0",
|
||||
"pytest-xdist>=3.0.0", # Parallel test execution
|
||||
]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
python_files = ["test_*.py"]
|
||||
python_classes = ["Test*"]
|
||||
python_functions = ["test_*"]
|
||||
addopts = [
|
||||
"-v", # Verbose output
|
||||
"--strict-markers", # Strict marker validation
|
||||
"--strict-config", # Strict config validation
|
||||
"--tb=short", # Short traceback format
|
||||
]
|
||||
markers = [
|
||||
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
|
||||
"integration: marks tests as integration tests",
|
||||
]
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
ansible==9.1.0
|
||||
jinja2~=3.1.3
|
||||
netaddr
|
|
@ -1,6 +1,10 @@
|
|||
---
|
||||
collections:
|
||||
- name: ansible.posix
|
||||
version: "==2.1.0"
|
||||
- name: community.general
|
||||
version: "==11.1.0"
|
||||
- name: community.crypto
|
||||
- name: openstack.cloud
|
||||
version: "==3.0.3"
|
||||
- name: openstack.cloud
|
||||
version: "==2.4.1"
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
---
|
||||
- name: Install requirements
|
||||
pip:
|
||||
requirements: https://raw.githubusercontent.com/ansible-collections/azure/v3.7.0/requirements-azure.txt
|
||||
state: latest
|
||||
virtualenv_python: python3
|
||||
# Azure dependencies are now managed via pyproject.toml optional dependencies
|
||||
# They will be installed automatically when needed
|
||||
|
|
|
@ -32,8 +32,10 @@
|
|||
- set_fact:
|
||||
algo_cs_key: "{{ cs_key | default(_cs_key.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_KEY'), true) }}"
|
||||
algo_cs_token: "{{ cs_secret | default(_cs_secret.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_SECRET'), true) }}"
|
||||
algo_cs_url: "{{ cs_url | default(_cs_url.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_ENDPOINT'), true) | default('https://api.exoscale.com/compute',\
|
||||
\ true) }}"
|
||||
algo_cs_url: >-
|
||||
{{ cs_url | default(_cs_url.user_input|default(None)) |
|
||||
default(lookup('env', 'CLOUDSTACK_ENDPOINT'), true) |
|
||||
default('https://api.exoscale.com/compute', true) }}
|
||||
no_log: true
|
||||
|
||||
- name: Get zones on cloud
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
---
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name:
|
||||
- cs
|
||||
- sshpubkeys
|
||||
state: latest
|
||||
virtualenv_python: python3
|
||||
# CloudStack dependencies are now managed via pyproject.toml optional dependencies
|
||||
# They will be installed automatically when needed
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
unique_name: true
|
||||
ipv6: true
|
||||
ssh_keys: "{{ do_ssh_key.data.ssh_key.id }}"
|
||||
user_data: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
|
||||
user_data: "{{ lookup('template', 'files/cloud-init/base.yml') | string }}"
|
||||
tags:
|
||||
- Environment:Algo
|
||||
register: digital_ocean_droplet
|
||||
|
|
|
@ -5,10 +5,8 @@ Parameters:
|
|||
InstanceTypeParameter:
|
||||
Type: String
|
||||
Default: t2.micro
|
||||
PublicSSHKeyParameter:
|
||||
Type: String
|
||||
ImageIdParameter:
|
||||
Type: String
|
||||
Type: AWS::EC2::Image::Id
|
||||
WireGuardPort:
|
||||
Type: String
|
||||
UseThisElasticIP:
|
||||
|
@ -83,8 +81,6 @@ Resources:
|
|||
Route:
|
||||
Type: AWS::EC2::Route
|
||||
DependsOn:
|
||||
- InternetGateway
|
||||
- RouteTable
|
||||
- VPCGatewayAttachment
|
||||
Properties:
|
||||
RouteTableId: !Ref RouteTable
|
||||
|
@ -94,8 +90,6 @@ Resources:
|
|||
RouteIPv6:
|
||||
Type: AWS::EC2::Route
|
||||
DependsOn:
|
||||
- InternetGateway
|
||||
- RouteTable
|
||||
- VPCGatewayAttachment
|
||||
Properties:
|
||||
RouteTableId: !Ref RouteTable
|
||||
|
@ -105,8 +99,6 @@ Resources:
|
|||
SubnetIPv6:
|
||||
Type: AWS::EC2::SubnetCidrBlock
|
||||
DependsOn:
|
||||
- RouteIPv6
|
||||
- VPC
|
||||
- VPCIPv6
|
||||
Properties:
|
||||
Ipv6CidrBlock:
|
||||
|
@ -118,10 +110,6 @@ Resources:
|
|||
|
||||
RouteSubnet:
|
||||
Type: "AWS::EC2::SubnetRouteTableAssociation"
|
||||
DependsOn:
|
||||
- RouteTable
|
||||
- Subnet
|
||||
- Route
|
||||
Properties:
|
||||
RouteTableId: !Ref RouteTable
|
||||
SubnetId: !Ref Subnet
|
||||
|
@ -167,8 +155,6 @@ Resources:
|
|||
Type: AWS::EC2::Instance
|
||||
DependsOn:
|
||||
- SubnetIPv6
|
||||
- Subnet
|
||||
- InstanceSecurityGroup
|
||||
Properties:
|
||||
InstanceType:
|
||||
Ref: InstanceTypeParameter
|
||||
|
@ -205,7 +191,6 @@ Resources:
|
|||
Domain: vpc
|
||||
InstanceId: !Ref EC2Instance
|
||||
DependsOn:
|
||||
- EC2Instance
|
||||
- VPCGatewayAttachment
|
||||
|
||||
ElasticIPAssociation:
|
||||
|
|
|
@ -3,13 +3,13 @@
|
|||
cloudformation:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
aws_session_token: "{{ session_token if session_token else omit }}"
|
||||
stack_name: "{{ stack_name }}"
|
||||
state: present
|
||||
region: "{{ algo_region }}"
|
||||
template: roles/cloud-ec2/files/stack.yaml
|
||||
template_parameters:
|
||||
InstanceTypeParameter: "{{ cloud_providers.ec2.size }}"
|
||||
PublicSSHKeyParameter: "{{ lookup('file', SSH_keys.public) }}"
|
||||
ImageIdParameter: "{{ ami_image }}"
|
||||
WireGuardPort: "{{ wireguard_port }}"
|
||||
UseThisElasticIP: "{{ existing_eip }}"
|
||||
|
|
|
@ -1,4 +1,30 @@
|
|||
---
|
||||
# Discover AWS credentials from standard locations
|
||||
- name: Set AWS credentials file path
|
||||
set_fact:
|
||||
aws_credentials_path: "{{ lookup('env', 'AWS_SHARED_CREDENTIALS_FILE') | default(lookup('env', 'HOME') + '/.aws/credentials', true) }}"
|
||||
aws_profile: "{{ lookup('env', 'AWS_PROFILE') | default('default', true) }}"
|
||||
|
||||
# Try to read credentials from file if not already provided
|
||||
- block:
|
||||
- name: Check if AWS credentials file exists
|
||||
stat:
|
||||
path: "{{ aws_credentials_path }}"
|
||||
register: aws_creds_file
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Read AWS credentials from file
|
||||
set_fact:
|
||||
_file_access_key: "{{ lookup('ini', 'aws_access_key_id', section=aws_profile, file=aws_credentials_path, errors='ignore') | default('', true) }}"
|
||||
_file_secret_key: "{{ lookup('ini', 'aws_secret_access_key', section=aws_profile, file=aws_credentials_path, errors='ignore') | default('', true) }}"
|
||||
_file_session_token: "{{ lookup('ini', 'aws_session_token', section=aws_profile, file=aws_credentials_path, errors='ignore') | default('', true) }}"
|
||||
when: aws_creds_file.stat.exists
|
||||
no_log: true
|
||||
when:
|
||||
- aws_access_key is undefined
|
||||
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
|
||||
|
||||
# Prompt for credentials if still not available
|
||||
- pause:
|
||||
prompt: |
|
||||
Enter your AWS Access Key ID (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
|
||||
|
@ -8,6 +34,7 @@
|
|||
when:
|
||||
- aws_access_key is undefined
|
||||
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
|
||||
- _file_access_key is undefined or _file_access_key|length <= 0
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
|
@ -17,10 +44,25 @@
|
|||
when:
|
||||
- aws_secret_key is undefined
|
||||
- lookup('env','AWS_SECRET_ACCESS_KEY')|length <= 0
|
||||
- _file_secret_key is undefined or _file_secret_key|length <= 0
|
||||
|
||||
# Set final credentials with proper precedence
|
||||
- set_fact:
|
||||
access_key: "{{ aws_access_key | default(_aws_access_key.user_input|default(None)) | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
|
||||
secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
|
||||
access_key: >-
|
||||
{{ aws_access_key
|
||||
| default(lookup('env', 'AWS_ACCESS_KEY_ID'))
|
||||
| default(_file_access_key)
|
||||
| default(_aws_access_key.user_input | default(None)) }}
|
||||
secret_key: >-
|
||||
{{ aws_secret_key
|
||||
| default(lookup('env', 'AWS_SECRET_ACCESS_KEY'))
|
||||
| default(_file_secret_key)
|
||||
| default(_aws_secret_key.user_input | default(None)) }}
|
||||
session_token: >-
|
||||
{{ aws_session_token
|
||||
| default(lookup('env', 'AWS_SESSION_TOKEN'))
|
||||
| default(_file_session_token)
|
||||
| default('') }}
|
||||
no_log: true
|
||||
|
||||
- block:
|
||||
|
@ -28,6 +70,7 @@
|
|||
aws_region_info:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
aws_session_token: "{{ session_token if session_token else omit }}"
|
||||
region: us-east-1
|
||||
register: _aws_regions
|
||||
no_log: true
|
||||
|
@ -69,6 +112,7 @@
|
|||
ec2_eip_info:
|
||||
aws_access_key: "{{ access_key }}"
|
||||
aws_secret_key: "{{ secret_key }}"
|
||||
aws_session_token: "{{ session_token if session_token else omit }}"
|
||||
region: "{{ algo_region }}"
|
||||
register: raw_eip_addresses
|
||||
no_log: true
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
---
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name:
|
||||
- boto>=2.5
|
||||
- boto3
|
||||
state: latest
|
||||
virtualenv_python: python3
|
||||
# AWS dependencies are now managed via pyproject.toml optional dependencies
|
||||
# They will be installed automatically when needed
|
||||
|
|
|
@ -10,8 +10,9 @@
|
|||
no_log: true
|
||||
|
||||
- set_fact:
|
||||
credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'),\
|
||||
\ true) }}"
|
||||
credentials_file_path: >-
|
||||
{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) |
|
||||
default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}
|
||||
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
|
||||
no_log: true
|
||||
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
---
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name:
|
||||
- requests>=2.18.4
|
||||
- google-auth>=1.3.0
|
||||
state: latest
|
||||
virtualenv_python: python3
|
||||
# GCP dependencies are now managed via pyproject.toml optional dependencies
|
||||
# They will be installed automatically when needed
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
---
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name:
|
||||
- hcloud
|
||||
state: latest
|
||||
virtualenv_python: python3
|
||||
# Hetzner dependencies are now managed via pyproject.toml optional dependencies
|
||||
# They will be installed automatically when needed
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
---
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name:
|
||||
- boto>=2.5
|
||||
- boto3
|
||||
state: latest
|
||||
virtualenv_python: python3
|
||||
# AWS dependencies are now managed via pyproject.toml optional dependencies
|
||||
# They will be installed automatically when needed
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
---
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name:
|
||||
- linode_api4
|
||||
state: latest
|
||||
virtualenv_python: python3
|
||||
# Linode dependencies are now managed via pyproject.toml optional dependencies
|
||||
# They will be installed automatically when needed
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
---
|
||||
- fail:
|
||||
msg: "OpenStack credentials are not set. Download it from the OpenStack dashboard->Compute->API Access and source it in the shell (eg: source /tmp/dhc-openrc.sh)"
|
||||
msg: >-
|
||||
OpenStack credentials are not set. Download it from the OpenStack dashboard->Compute->API Access
|
||||
and source it in the shell (eg: source /tmp/dhc-openrc.sh)
|
||||
when: lookup('env', 'OS_AUTH_URL')|length <= 0
|
||||
|
||||
- name: Build python virtual environment
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
---
|
||||
- name: Install requirements
|
||||
pip:
|
||||
name: shade
|
||||
state: latest
|
||||
virtualenv_python: python3
|
||||
# OpenStack dependencies are now managed via pyproject.toml optional dependencies
|
||||
# They will be installed automatically when needed
|
||||
|
|
|
@ -14,17 +14,23 @@
|
|||
algo_vultr_config: "{{ vultr_config | default(_vultr_config.user_input) | default(lookup('env','VULTR_API_CONFIG'), true) }}"
|
||||
no_log: true
|
||||
|
||||
- name: Set the Vultr API Key as a fact
|
||||
set_fact:
|
||||
vultr_api_key: "{{ lookup('ansible.builtin.ini', 'key', section='default', file=algo_vultr_config) }}"
|
||||
|
||||
- name: Get regions
|
||||
uri:
|
||||
url: https://api.vultr.com/v1/regions/list
|
||||
url: https://api.vultr.com/v2/regions
|
||||
method: GET
|
||||
status_code: 200
|
||||
headers:
|
||||
Authorization: "Bearer {{ vultr_api_key }}"
|
||||
register: _vultr_regions
|
||||
|
||||
- name: Format regions
|
||||
set_fact:
|
||||
regions: >-
|
||||
[ {% for k, v in _vultr_regions.json.items() %}
|
||||
[ {% for v in _vultr_regions.json['regions'] %}
|
||||
{{ v }}{% if not loop.last %},{% endif %}
|
||||
{% endfor %} ]
|
||||
|
||||
|
@ -34,17 +40,14 @@
|
|||
|
||||
- name: Set default region
|
||||
set_fact:
|
||||
default_region: >-
|
||||
{% for r in vultr_regions %}
|
||||
{%- if r['DCID'] == "1" %}{{ loop.index }}{% endif %}
|
||||
{%- endfor %}
|
||||
default_region: 1
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
What region should the server be located in?
|
||||
(https://www.vultr.com/locations/):
|
||||
{% for r in vultr_regions %}
|
||||
{{ loop.index }}. {{ r['name'] }} ({{ r['regioncode'] | lower }})
|
||||
{{ loop.index }}. {{ r['city'] }} ({{ r['id'] }})
|
||||
{% endfor %}
|
||||
|
||||
Enter the number of your desired region
|
||||
|
@ -56,5 +59,5 @@
|
|||
set_fact:
|
||||
algo_vultr_region: >-
|
||||
{% if region is defined %}{{ region }}
|
||||
{%- elif _algo_region.user_input %}{{ vultr_regions[_algo_region.user_input | int -1 ]['regioncode'] | lower }}
|
||||
{%- else %}{{ vultr_regions[default_region | int - 1]['regioncode'] | lower }}{% endif %}
|
||||
{%- elif _algo_region.user_input %}{{ vultr_regions[_algo_region.user_input | int -1 ]['id'] }}
|
||||
{%- else %}{{ vultr_regions[default_region | int - 1]['id'] }}{% endif %}
|
||||
|
|
|
@ -4,6 +4,6 @@ aip_supported_providers:
|
|||
- digitalocean
|
||||
snat_aipv4: false
|
||||
ipv6_default: "{{ ansible_default_ipv6.address + '/' + ansible_default_ipv6.prefix }}"
|
||||
ipv6_subnet_size: "{{ ipv6_default | ipaddr('size') }}"
|
||||
ipv6_subnet_size: "{{ ipv6_default | ansible.utils.ipaddr('size') }}"
|
||||
ipv6_egress_ip: >-
|
||||
{{ (ipv6_default | next_nth_usable(15 | random(seed=algo_server_name + ansible_fqdn))) + '/124' if ipv6_subnet_size|int > 1 else ipv6_default }}
|
||||
{{ (ipv6_default | ansible.utils.next_nth_usable(15 | random(seed=algo_server_name + ansible_fqdn))) + '/124' if ipv6_subnet_size|int > 1 else ipv6_default }}
|
||||
|
|
|
@ -11,5 +11,5 @@
|
|||
|
||||
- name: Verify SNAT IPv4 found
|
||||
assert:
|
||||
that: snat_aipv4 | ipv4
|
||||
that: snat_aipv4 | ansible.utils.ipv4
|
||||
msg: The SNAT IPv4 address not found. Cannot proceed with the alternative ingress ip.
|
||||
|
|
56
roles/common/tasks/bsd_ipv6_facts.yml
Normal file
56
roles/common/tasks/bsd_ipv6_facts.yml
Normal file
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
# BSD systems return IPv6 addresses in the order they were added to the interface,
|
||||
# not sorted by scope like Linux does. This means ansible_default_ipv6 often contains
|
||||
# a link-local address (fe80::) instead of a global address, which breaks certificate
|
||||
# generation due to the %interface suffix.
|
||||
#
|
||||
# This task file creates a fact with the first global IPv6 address found.
|
||||
|
||||
- name: Initialize all_ipv6_addresses as empty list
|
||||
set_fact:
|
||||
all_ipv6_addresses: []
|
||||
|
||||
- name: Get all IPv6 addresses for the default interface
|
||||
set_fact:
|
||||
all_ipv6_addresses: "{{ ansible_facts[ansible_default_ipv6.interface]['ipv6'] | default([]) }}"
|
||||
when:
|
||||
- ansible_default_ipv6 is defined
|
||||
- ansible_default_ipv6.interface is defined
|
||||
- ansible_facts[ansible_default_ipv6.interface] is defined
|
||||
|
||||
- name: Find first global IPv6 address from interface-specific addresses
|
||||
set_fact:
|
||||
global_ipv6_address: "{{ item.address }}"
|
||||
global_ipv6_prefix: "{{ item.prefix }}"
|
||||
loop: "{{ all_ipv6_addresses }}"
|
||||
when:
|
||||
- all_ipv6_addresses | length > 0
|
||||
- item.address is defined
|
||||
- not item.address.startswith('fe80:') # Filter out link-local addresses
|
||||
- "'%' not in item.address" # Ensure no interface suffix
|
||||
- global_ipv6_address is not defined # Only set once
|
||||
loop_control:
|
||||
label: "{{ item.address | default('no address') }}"
|
||||
|
||||
- name: Find first global IPv6 address from ansible_all_ipv6_addresses
|
||||
set_fact:
|
||||
global_ipv6_address: "{{ item | regex_replace('%.*', '') }}"
|
||||
global_ipv6_prefix: "128" # Assume /128 for addresses from this list
|
||||
loop: "{{ ansible_all_ipv6_addresses | default([]) }}"
|
||||
when:
|
||||
- global_ipv6_address is not defined
|
||||
- ansible_all_ipv6_addresses is defined
|
||||
- not item.startswith('fe80:')
|
||||
|
||||
- name: Override ansible_default_ipv6 with global address on BSD
|
||||
set_fact:
|
||||
ansible_default_ipv6: "{{ ansible_default_ipv6 | combine({'address': global_ipv6_address, 'prefix': global_ipv6_prefix}) }}"
|
||||
when:
|
||||
- global_ipv6_address is defined
|
||||
- ansible_default_ipv6 is defined
|
||||
- ansible_default_ipv6.address.startswith('fe80:') or '%' in ansible_default_ipv6.address
|
||||
|
||||
- name: Debug IPv6 address selection
|
||||
debug:
|
||||
msg: "Selected IPv6 address: {{ ansible_default_ipv6.address | default('none') }}"
|
||||
when: algo_debug | default(false) | bool
|
|
@ -1,78 +0,0 @@
|
|||
---
|
||||
- name: FreeBSD | Install prerequisites
|
||||
package:
|
||||
name:
|
||||
- python3
|
||||
- sudo
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/local/bin/python2.7
|
||||
|
||||
- name: Set python3 as the interpreter to use
|
||||
set_fact:
|
||||
ansible_python_interpreter: /usr/local/bin/python3
|
||||
|
||||
- name: Gather facts
|
||||
setup:
|
||||
- name: Gather additional facts
|
||||
import_tasks: facts.yml
|
||||
|
||||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
config_prefix: /usr/local/
|
||||
strongswan_shell: /usr/sbin/nologin
|
||||
strongswan_home: /var/empty
|
||||
root_group: wheel
|
||||
ssh_service_name: sshd
|
||||
apparmor_enabled: false
|
||||
strongswan_additional_plugins:
|
||||
- kernel-pfroute
|
||||
- kernel-pfkey
|
||||
tools:
|
||||
- git
|
||||
- subversion
|
||||
- screen
|
||||
- coreutils
|
||||
- openssl
|
||||
- bash
|
||||
- wget
|
||||
sysctl:
|
||||
- item: net.inet.ip.forwarding
|
||||
value: 1
|
||||
- item: "{{ 'net.inet6.ip6.forwarding' if ipv6_support else none }}"
|
||||
value: 1
|
||||
|
||||
- name: Install tools
|
||||
package: name="{{ item }}" state=present
|
||||
with_items:
|
||||
- "{{ tools|default([]) }}"
|
||||
|
||||
- name: Loopback included into the rc config
|
||||
blockinfile:
|
||||
dest: /etc/rc.conf
|
||||
create: true
|
||||
block: |
|
||||
cloned_interfaces="lo100"
|
||||
ifconfig_lo100="inet {{ local_service_ip }} netmask 255.255.255.255"
|
||||
ifconfig_lo100_ipv6="inet6 {{ local_service_ipv6 }}/128"
|
||||
notify:
|
||||
- restart loopback bsd
|
||||
|
||||
- name: Enable the gateway features
|
||||
lineinfile: dest=/etc/rc.conf regexp='^{{ item.param }}.*' line='{{ item.param }}={{ item.value }}'
|
||||
with_items:
|
||||
- { param: firewall_enable, value: '"YES"' }
|
||||
- { param: firewall_type, value: '"open"' }
|
||||
- { param: gateway_enable, value: '"YES"' }
|
||||
- { param: natd_enable, value: '"YES"' }
|
||||
- { param: natd_interface, value: '"{{ ansible_default_ipv4.device|default() }}"' }
|
||||
- { param: natd_flags, value: '"-dynamic -m"' }
|
||||
notify:
|
||||
- restart ipfw
|
||||
|
||||
- name: FreeBSD | Activate IPFW
|
||||
shell: >
|
||||
kldstat -n ipfw.ko || kldload ipfw ; sysctl net.inet.ip.fw.enable=0 &&
|
||||
bash /etc/rc.firewall && sysctl net.inet.ip.fw.enable=1
|
||||
changed_when: false
|
||||
|
||||
- meta: flush_handlers
|
|
@ -14,10 +14,6 @@
|
|||
tags:
|
||||
- update-users
|
||||
|
||||
- include_tasks: freebsd.yml
|
||||
when: '"FreeBSD" in OS.stdout'
|
||||
tags:
|
||||
- update-users
|
||||
|
||||
- name: Sysctl tuning
|
||||
sysctl: name="{{ item.item }}" value="{{ item.value }}"
|
||||
|
|
63
roles/common/tasks/packages.yml
Normal file
63
roles/common/tasks/packages.yml
Normal file
|
@ -0,0 +1,63 @@
|
|||
---
|
||||
- name: Initialize package lists
|
||||
set_fact:
|
||||
algo_packages: "{{ tools | default([]) if not performance_preinstall_packages | default(false) else [] }}"
|
||||
algo_packages_optional: []
|
||||
when: performance_parallel_packages | default(true)
|
||||
|
||||
- name: Add StrongSwan packages
|
||||
set_fact:
|
||||
algo_packages: "{{ algo_packages + ['strongswan'] }}"
|
||||
when:
|
||||
- performance_parallel_packages | default(true)
|
||||
- ipsec_enabled | default(false)
|
||||
|
||||
- name: Add WireGuard packages
|
||||
set_fact:
|
||||
algo_packages: "{{ algo_packages + ['wireguard'] }}"
|
||||
when:
|
||||
- performance_parallel_packages | default(true)
|
||||
- wireguard_enabled | default(true)
|
||||
|
||||
- name: Add DNS packages
|
||||
set_fact:
|
||||
algo_packages: "{{ algo_packages + ['dnscrypt-proxy'] }}"
|
||||
when:
|
||||
- performance_parallel_packages | default(true)
|
||||
# dnscrypt-proxy handles both DNS ad-blocking and DNS-over-HTTPS/TLS encryption
|
||||
# Install if user wants either ad-blocking OR encrypted DNS (or both)
|
||||
- algo_dns_adblocking | default(false) or dns_encryption | default(false)
|
||||
|
||||
- name: Add kernel headers to optional packages
|
||||
set_fact:
|
||||
algo_packages_optional: "{{ algo_packages_optional + ['linux-headers-generic', 'linux-headers-' + ansible_kernel] }}"
|
||||
when:
|
||||
- performance_parallel_packages | default(true)
|
||||
- install_headers | default(false)
|
||||
|
||||
- name: Install all packages in batch (performance optimization)
|
||||
apt:
|
||||
name: "{{ algo_packages | unique }}"
|
||||
state: present
|
||||
update_cache: true
|
||||
install_recommends: true
|
||||
when:
|
||||
- performance_parallel_packages | default(true)
|
||||
- algo_packages | length > 0
|
||||
|
||||
- name: Install optional packages in batch
|
||||
apt:
|
||||
name: "{{ algo_packages_optional | unique }}"
|
||||
state: present
|
||||
when:
|
||||
- performance_parallel_packages | default(true)
|
||||
- algo_packages_optional | length > 0
|
||||
|
||||
- name: Debug - Show batched packages
|
||||
debug:
|
||||
msg:
|
||||
- "Batch installed {{ algo_packages | length }} main packages: {{ algo_packages | unique | join(', ') }}"
|
||||
- "Batch installed {{ algo_packages_optional | length }} optional packages: {{ algo_packages_optional | unique | join(', ') }}"
|
||||
when:
|
||||
- performance_parallel_packages | default(true)
|
||||
- (algo_packages | length > 0 or algo_packages_optional | length > 0)
|
|
@ -14,24 +14,50 @@
|
|||
delay: 10
|
||||
|
||||
- name: Check if reboot is required
|
||||
shell: >
|
||||
if [[ -e /var/run/reboot-required ]]; then echo "required"; else echo "no"; fi
|
||||
shell: |
|
||||
set -o pipefail
|
||||
if [[ -e /var/run/reboot-required ]]; then
|
||||
# Check if kernel was updated (most critical reboot reason)
|
||||
if grep -q "linux-image\|linux-generic\|linux-headers" /var/log/dpkg.log.1 /var/log/dpkg.log 2>/dev/null; then
|
||||
echo "kernel-updated"
|
||||
else
|
||||
echo "optional"
|
||||
fi
|
||||
else
|
||||
echo "no"
|
||||
fi
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: reboot_required
|
||||
|
||||
- name: Reboot
|
||||
- name: Reboot (kernel updated or performance optimization disabled)
|
||||
shell: sleep 2 && shutdown -r now "Ansible updates triggered"
|
||||
async: 1
|
||||
poll: 0
|
||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||
ignore_errors: true
|
||||
when: >
|
||||
reboot_required is defined and (
|
||||
reboot_required.stdout == 'kernel-updated' or
|
||||
(reboot_required.stdout == 'optional' and not performance_skip_optional_reboots|default(false))
|
||||
)
|
||||
failed_when: false
|
||||
|
||||
- name: Skip reboot (performance optimization enabled)
|
||||
debug:
|
||||
msg: "Skipping reboot - performance optimization enabled. No kernel updates detected."
|
||||
when: >
|
||||
reboot_required is defined and
|
||||
reboot_required.stdout == 'optional' and
|
||||
performance_skip_optional_reboots|default(false)
|
||||
|
||||
- name: Wait until the server becomes ready...
|
||||
wait_for_connection:
|
||||
delay: 20
|
||||
timeout: 320
|
||||
when: reboot_required is defined and reboot_required.stdout == 'required'
|
||||
when: >
|
||||
reboot_required is defined and (
|
||||
reboot_required.stdout == 'kernel-updated' or
|
||||
(reboot_required.stdout == 'optional' and not performance_skip_optional_reboots|default(false))
|
||||
)
|
||||
become: false
|
||||
when: algo_provider != "local"
|
||||
|
||||
|
@ -40,6 +66,7 @@
|
|||
|
||||
- name: Disable MOTD on login and SSHD
|
||||
replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}"
|
||||
become: true
|
||||
with_items:
|
||||
- { regexp: ^session.*optional.*pam_motd.so.*, line: "# MOTD DISABLED", file: /etc/pam.d/login }
|
||||
- { regexp: ^session.*optional.*pam_motd.so.*, line: "# MOTD DISABLED", file: /etc/pam.d/sshd }
|
||||
|
@ -107,19 +134,28 @@
|
|||
- item: "{{ 'net.ipv6.conf.all.forwarding' if ipv6_support else none }}"
|
||||
value: 1
|
||||
|
||||
- name: Install tools
|
||||
- name: Install packages (batch optimization)
|
||||
include_tasks: packages.yml
|
||||
when: performance_parallel_packages | default(true)
|
||||
|
||||
- name: Install tools (legacy method)
|
||||
apt:
|
||||
name: "{{ tools|default([]) }}"
|
||||
state: present
|
||||
update_cache: true
|
||||
when:
|
||||
- not performance_parallel_packages | default(true)
|
||||
- not performance_preinstall_packages | default(false)
|
||||
|
||||
- name: Install headers
|
||||
- name: Install headers (legacy method)
|
||||
apt:
|
||||
name:
|
||||
- linux-headers-generic
|
||||
- linux-headers-{{ ansible_kernel }}
|
||||
state: present
|
||||
when: install_headers | bool
|
||||
when:
|
||||
- not performance_parallel_packages | default(true)
|
||||
- install_headers | bool
|
||||
|
||||
- name: Configure the alternative ingress ip
|
||||
include_tasks: aip/main.yml
|
||||
|
|
|
@ -35,7 +35,7 @@ COMMIT
|
|||
-A PREROUTING --in-interface {{ ansible_default_ipv6['interface'] }} -p udp --dport {{ wireguard_port_avoid }} -j REDIRECT --to-port {{ wireguard_port_actual }}
|
||||
{% endif %}
|
||||
# Allow traffic from the VPN network to the outside world, and replies
|
||||
-A POSTROUTING -s {{ subnets|join(',') }} -m policy --pol none --dir out {{ '-j SNAT --to ' + ipv6_egress_ip | ipaddr('address') if alternative_ingress_ip else '-j MASQUERADE' }}
|
||||
-A POSTROUTING -s {{ subnets|join(',') }} -m policy --pol none --dir out {{ '-j SNAT --to ' + ipv6_egress_ip | ansible.utils.ipaddr('address') if alternative_ingress_ip else '-j MASQUERADE' }}
|
||||
|
||||
COMMIT
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: daemon reload
|
||||
- name: daemon-reload
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
|
||||
|
@ -9,9 +9,3 @@
|
|||
state: restarted
|
||||
daemon_reload: true
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: restart dnscrypt-proxy
|
||||
service:
|
||||
name: dnscrypt-proxy
|
||||
state: restarted
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
---
|
||||
- name: Install dnscrypt-proxy
|
||||
package:
|
||||
name: dnscrypt-proxy2
|
||||
|
||||
- name: Enable mac_portacl
|
||||
lineinfile:
|
||||
path: /etc/rc.conf
|
||||
line: dnscrypt_proxy_mac_portacl_enable="YES"
|
|
@ -3,9 +3,6 @@
|
|||
include_tasks: ubuntu.yml
|
||||
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Include tasks for FreeBSD
|
||||
include_tasks: freebsd.yml
|
||||
when: ansible_distribution == 'FreeBSD'
|
||||
|
||||
- name: dnscrypt-proxy ip-blacklist configured
|
||||
template:
|
||||
|
|
|
@ -19,11 +19,12 @@
|
|||
mode: 0644
|
||||
when: ansible_facts['distribution_version'] is version('20.04', '<')
|
||||
|
||||
- name: Install dnscrypt-proxy
|
||||
- name: Install dnscrypt-proxy (individual)
|
||||
apt:
|
||||
name: dnscrypt-proxy
|
||||
state: present
|
||||
update_cache: true
|
||||
when: not performance_parallel_packages | default(true)
|
||||
|
||||
- block:
|
||||
- name: Ubuntu | Configure AppArmor policy for dnscrypt-proxy
|
||||
|
@ -61,3 +62,38 @@
|
|||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
notify:
|
||||
- restart dnscrypt-proxy
|
||||
|
||||
- name: Ubuntu | Apply systemd security hardening for dnscrypt-proxy
|
||||
copy:
|
||||
dest: /etc/systemd/system/dnscrypt-proxy.service.d/90-security-hardening.conf
|
||||
content: |
|
||||
# Algo VPN systemd security hardening for dnscrypt-proxy
|
||||
# Additional hardening on top of comprehensive AppArmor
|
||||
[Service]
|
||||
# Privilege restrictions
|
||||
NoNewPrivileges=yes
|
||||
|
||||
# Filesystem isolation (complements AppArmor)
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
PrivateTmp=yes
|
||||
PrivateDevices=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectControlGroups=yes
|
||||
|
||||
# Network restrictions
|
||||
RestrictAddressFamilies=AF_INET AF_INET6
|
||||
|
||||
# Allow access to dnscrypt-proxy cache (AppArmor also controls this)
|
||||
ReadWritePaths=/var/cache/dnscrypt-proxy
|
||||
|
||||
# System call filtering (complements AppArmor restrictions)
|
||||
SystemCallFilter=@system-service @network-io
|
||||
SystemCallFilter=~@debug @mount @swap @reboot @raw-io
|
||||
SystemCallErrorNumber=EPERM
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
notify:
|
||||
- daemon-reload
|
||||
- restart dnscrypt-proxy
|
||||
|
|
|
@ -206,7 +206,7 @@ tls_disable_session_tickets = true
|
|||
## People in China may need to use 114.114.114.114:53 here.
|
||||
## Other popular options include 8.8.8.8 and 1.1.1.1.
|
||||
|
||||
fallback_resolver = '{% if ansible_distribution == "FreeBSD" %}{{ ansible_dns.nameservers.0 }}:53{% else %}127.0.0.53:53{% endif %}'
|
||||
fallback_resolver = '127.0.0.53:53'
|
||||
|
||||
|
||||
## Never let dnscrypt-proxy try to use the system DNS settings;
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
---
|
||||
- pause:
|
||||
prompt: "{{ item }}"
|
||||
prompt: |
|
||||
https://trailofbits.github.io/algo/deploy-to-ubuntu.html
|
||||
|
||||
Local installation might break your server. Use at your own risk.
|
||||
|
||||
Proceed? Press ENTER to continue or CTRL+C and A to abort...
|
||||
when: not tests|default(false)|bool
|
||||
tags:
|
||||
- skip_ansible_lint
|
||||
with_items: |
|
||||
https://trailofbits.github.io/algo/deploy-to-ubuntu.html
|
||||
|
||||
Local installation might break your server. Use at your own risk.
|
||||
|
||||
Proceed? Press ENTER to continue or CTRL+C and A to abort...
|
||||
|
||||
- pause:
|
||||
prompt: |
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
user:
|
||||
name: "{{ item }}"
|
||||
group: algo
|
||||
groups: algo
|
||||
home: /var/jail/{{ item }}
|
||||
createhome: true
|
||||
generate_ssh_key: false
|
||||
|
@ -66,7 +67,7 @@
|
|||
passphrase: "{{ p12_export_password }}"
|
||||
cipher: auto
|
||||
force: false
|
||||
no_log: "{{ no_log|bool }}"
|
||||
no_log: "{{ algo_no_log|bool }}"
|
||||
when: not item.stat.exists
|
||||
with_items: "{{ privatekey.results }}"
|
||||
register: openssl_privatekey
|
||||
|
@ -78,7 +79,7 @@
|
|||
privatekey_passphrase: "{{ p12_export_password }}"
|
||||
format: OpenSSH
|
||||
force: true
|
||||
no_log: "{{ no_log|bool }}"
|
||||
no_log: "{{ algo_no_log|bool }}"
|
||||
when: item.changed
|
||||
with_items: "{{ openssl_privatekey.results }}"
|
||||
|
||||
|
|
|
@ -11,7 +11,12 @@ algo_ondemand_wifi_exclude: _null
|
|||
algo_dns_adblocking: false
|
||||
ipv6_support: false
|
||||
dns_encryption: true
|
||||
# Random UUID for CA name constraints - prevents certificate reuse across different Algo deployments
|
||||
# This unique identifier ensures each CA can only issue certificates for its specific server instance
|
||||
openssl_constraint_random_id: "{{ IP_subject_alt_name | to_uuid }}.algo"
|
||||
# Subject Alternative Name (SAN) configuration - CRITICAL for client compatibility
|
||||
# Modern clients (especially macOS/iOS) REQUIRE SAN extension in server certificates
|
||||
# Without SAN, IKEv2 connections will fail with certificate validation errors
|
||||
subjectAltName_type: "{{ 'DNS' if IP_subject_alt_name|regex_search('[a-z]') else 'IP' }}"
|
||||
subjectAltName: >-
|
||||
{{ subjectAltName_type }}:{{ IP_subject_alt_name }}
|
||||
|
@ -21,14 +26,18 @@ nameConstraints: >-
|
|||
critical,permitted;{{ subjectAltName_type }}:{{ IP_subject_alt_name }}{{- '/255.255.255.255' if subjectAltName_type == 'IP' else '' -}}
|
||||
{%- if subjectAltName_type == 'IP' -%}
|
||||
,permitted;DNS:{{ openssl_constraint_random_id }}
|
||||
,excluded;DNS:.com,excluded;DNS:.org,excluded;DNS:.net,excluded;DNS:.gov,excluded;DNS:.edu,excluded;DNS:.mil,excluded;DNS:.int
|
||||
,excluded;IP:10.0.0.0/255.0.0.0,excluded;IP:172.16.0.0/255.240.0.0,excluded;IP:192.168.0.0/255.255.0.0
|
||||
{%- else -%}
|
||||
,excluded;IP:0.0.0.0/0.0.0.0
|
||||
{%- endif -%}
|
||||
,permitted;email:{{ openssl_constraint_random_id }}
|
||||
,excluded;email:.com,excluded;email:.org,excluded;email:.net,excluded;email:.gov,excluded;email:.edu,excluded;email:.mil,excluded;email:.int
|
||||
{%- if ipv6_support -%}
|
||||
,permitted;IP:{{ ansible_default_ipv6['address'] }}/ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
,excluded;IP:fc00:0:0:0:0:0:0:0/fe00:0:0:0:0:0:0:0,excluded;IP:fe80:0:0:0:0:0:0:0/ffc0:0:0:0:0:0:0:0,excluded;IP:2001:db8:0:0:0:0:0:0/ffff:fff8:0:0:0:0:0:0
|
||||
{%- else -%}
|
||||
,excluded;IP:0:0:0:0:0:0:0:0/0:0:0:0:0:0:0:0
|
||||
,excluded;IP:::/0
|
||||
{%- endif -%}
|
||||
openssl_bin: openssl
|
||||
strongswan_enabled_plugins:
|
||||
|
|
|
@ -9,4 +9,28 @@
|
|||
service: name=apparmor state=restarted
|
||||
|
||||
- name: rereadcrls
|
||||
shell: ipsec rereadcrls; ipsec purgecrls
|
||||
shell: |
|
||||
# Check if StrongSwan is actually running
|
||||
if ! systemctl is-active --quiet strongswan-starter 2>/dev/null && \
|
||||
! systemctl is-active --quiet strongswan 2>/dev/null && \
|
||||
! service strongswan status >/dev/null 2>&1; then
|
||||
echo "StrongSwan is not running, skipping CRL reload"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# StrongSwan is running, wait a moment for it to stabilize
|
||||
sleep 2
|
||||
|
||||
# Try to reload CRLs with retries
|
||||
for attempt in 1 2 3; do
|
||||
if ipsec rereadcrls 2>/dev/null && ipsec purgecrls 2>/dev/null; then
|
||||
echo "Successfully reloaded CRLs"
|
||||
exit 0
|
||||
fi
|
||||
echo "Attempt $attempt failed, retrying..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# If StrongSwan is running but we can't reload CRLs, that's a real problem
|
||||
echo "Failed to reload CRLs after 3 attempts"
|
||||
exit 1
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
with_together:
|
||||
- "{{ users }}"
|
||||
- "{{ PayloadContent.results }}"
|
||||
no_log: "{{ no_log|bool }}"
|
||||
no_log: "{{ algo_no_log|bool }}"
|
||||
|
||||
- name: Build the client ipsec config file
|
||||
template:
|
||||
|
@ -33,6 +33,7 @@
|
|||
with_items:
|
||||
- "{{ users }}"
|
||||
|
||||
|
||||
- name: Build the client ipsec secret file
|
||||
template:
|
||||
src: client_ipsec.secrets.j2
|
||||
|
|
|
@ -15,13 +15,9 @@
|
|||
recurse: true
|
||||
mode: "0700"
|
||||
with_items:
|
||||
- ecparams
|
||||
- certs
|
||||
- crl
|
||||
- newcerts
|
||||
- private
|
||||
- public
|
||||
- reqs
|
||||
|
||||
- name: Ensure the config directories exist
|
||||
file:
|
||||
|
@ -33,38 +29,69 @@
|
|||
- apple
|
||||
- manual
|
||||
|
||||
- name: Ensure the files exist
|
||||
file:
|
||||
dest: "{{ ipsec_pki_path }}/{{ item }}"
|
||||
state: touch
|
||||
with_items:
|
||||
- .rnd
|
||||
- private/.rnd
|
||||
- index.txt
|
||||
- index.txt.attr
|
||||
- serial
|
||||
- name: Create private key with password protection
|
||||
community.crypto.openssl_privatekey:
|
||||
path: "{{ ipsec_pki_path }}/private/cakey.pem"
|
||||
passphrase: "{{ CA_password }}"
|
||||
type: ECC
|
||||
curve: secp384r1
|
||||
mode: "0600"
|
||||
no_log: true
|
||||
|
||||
- name: Generate the openssl server configs
|
||||
template:
|
||||
src: openssl.cnf.j2
|
||||
dest: "{{ ipsec_pki_path }}/openssl.cnf"
|
||||
# CA certificate with name constraints to prevent certificate misuse (Issue #75)
|
||||
- name: Create certificate signing request (CSR) for CA certificate with security constraints
|
||||
community.crypto.openssl_csr_pipe:
|
||||
privatekey_path: "{{ ipsec_pki_path }}/private/cakey.pem"
|
||||
privatekey_passphrase: "{{ CA_password }}"
|
||||
common_name: "{{ IP_subject_alt_name }}"
|
||||
use_common_name_for_san: true
|
||||
# Generate Subject Key Identifier for proper Authority Key Identifier creation
|
||||
create_subject_key_identifier: true
|
||||
basic_constraints:
|
||||
- 'CA:TRUE'
|
||||
- 'pathlen:0' # Prevents sub-CA creation - limits certificate chain depth if CA key compromised
|
||||
basic_constraints_critical: true
|
||||
key_usage:
|
||||
- keyCertSign
|
||||
- cRLSign
|
||||
key_usage_critical: true
|
||||
# CA restricted to VPN certificate issuance only
|
||||
extended_key_usage:
|
||||
- '1.3.6.1.5.5.7.3.17' # IPsec End Entity OID - VPN-specific usage
|
||||
extended_key_usage_critical: true
|
||||
# Name Constraints: Defense-in-depth security restricting certificate scope to prevent misuse
|
||||
# Limits CA to only issue certificates for this specific VPN deployment's resources
|
||||
# Per-deployment UUID prevents cross-deployment reuse, unique email domain isolates certificate scope
|
||||
name_constraints_permitted: >-
|
||||
{{ [
|
||||
subjectAltName_type + ':' + IP_subject_alt_name + ('/255.255.255.255' if subjectAltName_type == 'IP' else ''),
|
||||
'DNS:' + openssl_constraint_random_id,
|
||||
'email:' + openssl_constraint_random_id
|
||||
] + (
|
||||
['IP:' + ansible_default_ipv6['address'] + '/128'] if ipv6_support else []
|
||||
) }}
|
||||
# Block public domains/networks to prevent certificate abuse for impersonation attacks
|
||||
# Public TLD exclusion, Email domain exclusion, RFC 1918: prevents lateral movement
|
||||
# IPv6: ULA/link-local/doc ranges or all
|
||||
name_constraints_excluded: >-
|
||||
{{ [
|
||||
'DNS:.com', 'DNS:.org', 'DNS:.net', 'DNS:.gov', 'DNS:.edu', 'DNS:.mil', 'DNS:.int',
|
||||
'email:.com', 'email:.org', 'email:.net', 'email:.gov', 'email:.edu', 'email:.mil', 'email:.int',
|
||||
'IP:10.0.0.0/255.0.0.0', 'IP:172.16.0.0/255.240.0.0', 'IP:192.168.0.0/255.255.0.0'
|
||||
] + (
|
||||
['IP:fc00::/7', 'IP:fe80::/10', 'IP:2001:db8::/32'] if ipv6_support else ['IP:::/0']
|
||||
) }}
|
||||
name_constraints_critical: true
|
||||
register: ca_csr
|
||||
|
||||
- name: Build the CA pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} ecparam -name secp384r1 -out ecparams/secp384r1.pem &&
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}"))
|
||||
-keyout private/cakey.pem
|
||||
-out cacert.pem -x509 -days 3650
|
||||
-batch
|
||||
-passout pass:"{{ CA_password }}" &&
|
||||
touch {{ IP_subject_alt_name }}_ca_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: "{{ IP_subject_alt_name }}_ca_generated"
|
||||
executable: bash
|
||||
- name: Create self-signed CA certificate from CSR
|
||||
community.crypto.x509_certificate:
|
||||
path: "{{ ipsec_pki_path }}/cacert.pem"
|
||||
csr_content: "{{ ca_csr.csr }}"
|
||||
privatekey_path: "{{ ipsec_pki_path }}/private/cakey.pem"
|
||||
privatekey_passphrase: "{{ CA_password }}"
|
||||
provider: selfsigned
|
||||
mode: "0644"
|
||||
no_log: true
|
||||
|
||||
- name: Copy the CA certificate
|
||||
|
@ -72,142 +99,114 @@
|
|||
src: "{{ ipsec_pki_path }}/cacert.pem"
|
||||
dest: "{{ ipsec_config_path }}/manual/cacert.pem"
|
||||
|
||||
- name: Generate the serial number
|
||||
shell: echo 01 > serial && touch serial_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: serial_generated
|
||||
- name: Create private keys for users and server
|
||||
community.crypto.openssl_privatekey:
|
||||
path: "{{ ipsec_pki_path }}/private/{{ item }}.key"
|
||||
type: ECC
|
||||
curve: secp384r1
|
||||
mode: "0600"
|
||||
with_items:
|
||||
- "{{ users }}"
|
||||
- "{{ IP_subject_alt_name }}"
|
||||
register: client_key_jobs
|
||||
|
||||
- name: Build the server pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}"))
|
||||
-keyout private/{{ IP_subject_alt_name }}.key
|
||||
-out reqs/{{ IP_subject_alt_name }}.req -nodes
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ IP_subject_alt_name }}" -batch &&
|
||||
{{ openssl_bin }} ca -utf8
|
||||
-in reqs/{{ IP_subject_alt_name }}.req
|
||||
-out certs/{{ IP_subject_alt_name }}.crt
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}"))
|
||||
-days 3650 -batch
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ IP_subject_alt_name }}" &&
|
||||
touch certs/{{ IP_subject_alt_name }}_crt_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: certs/{{ IP_subject_alt_name }}_crt_generated
|
||||
executable: bash
|
||||
no_log: true
|
||||
# Server certificate with SAN extension - required for modern Apple devices
|
||||
- name: Create CSRs for server certificate with SAN
|
||||
community.crypto.openssl_csr_pipe:
|
||||
privatekey_path: "{{ ipsec_pki_path }}/private/{{ IP_subject_alt_name }}.key"
|
||||
subject_alt_name: "{{ subjectAltName.split(',') }}"
|
||||
common_name: "{{ IP_subject_alt_name }}"
|
||||
# Add Basic Constraints to prevent certificate chain validation errors
|
||||
basic_constraints:
|
||||
- 'CA:FALSE'
|
||||
basic_constraints_critical: false
|
||||
key_usage:
|
||||
- digitalSignature
|
||||
- keyEncipherment
|
||||
key_usage_critical: false
|
||||
# Server auth EKU required for IKEv2 server certificates (Issue #75)
|
||||
# NOTE: clientAuth deliberately excluded to prevent role confusion attacks
|
||||
extended_key_usage:
|
||||
- serverAuth # Server Authentication (RFC 5280)
|
||||
- '1.3.6.1.5.5.7.3.17' # IPsec End Entity (RFC 4945)
|
||||
extended_key_usage_critical: false
|
||||
register: server_csr
|
||||
|
||||
- name: Build the client's pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}"))
|
||||
-keyout private/{{ item }}.key
|
||||
-out reqs/{{ item }}.req -nodes
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ item }}" -batch &&
|
||||
{{ openssl_bin }} ca -utf8
|
||||
-in reqs/{{ item }}.req
|
||||
-out certs/{{ item }}.crt
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}"))
|
||||
-days 3650 -batch
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN={{ item }}" &&
|
||||
touch certs/{{ item }}_crt_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: certs/{{ item }}_crt_generated
|
||||
executable: bash
|
||||
with_items: "{{ users }}"
|
||||
no_log: true
|
||||
|
||||
- name: Build the tests pair
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} req -utf8 -new
|
||||
-newkey ec:ecparams/secp384r1.pem
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:google-algo-test-pair.com"))
|
||||
-keyout private/google-algo-test-pair.com.key
|
||||
-out reqs/google-algo-test-pair.com.req -nodes
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN=google-algo-test-pair.com" -batch &&
|
||||
{{ openssl_bin }} ca -utf8
|
||||
-in reqs/google-algo-test-pair.com.req
|
||||
-out certs/google-algo-test-pair.com.crt
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:google-algo-test-pair.com"))
|
||||
-days 3650 -batch
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-subj "/CN=google-algo-test-pair.com" &&
|
||||
touch certs/google-algo-test-pair.com_crt_generated
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: certs/google-algo-test-pair.com_crt_generated
|
||||
executable: bash
|
||||
when: tests|default(false)|bool
|
||||
no_log: true
|
||||
|
||||
- name: Build openssh public keys
|
||||
openssl_publickey:
|
||||
path: "{{ ipsec_pki_path }}/public/{{ item }}.pub"
|
||||
- name: Create CSRs for client certificates
|
||||
community.crypto.openssl_csr_pipe:
|
||||
privatekey_path: "{{ ipsec_pki_path }}/private/{{ item }}.key"
|
||||
format: OpenSSH
|
||||
subject_alt_name:
|
||||
- "email:{{ item }}@{{ openssl_constraint_random_id }}" # UUID domain prevents certificate reuse across deployments
|
||||
common_name: "{{ item }}"
|
||||
# Add Basic Constraints to client certificates for proper PKI validation
|
||||
basic_constraints:
|
||||
- 'CA:FALSE'
|
||||
basic_constraints_critical: false
|
||||
key_usage:
|
||||
- digitalSignature
|
||||
- keyEncipherment
|
||||
key_usage_critical: false
|
||||
# Client certs restricted to clientAuth only - prevents clients from impersonating the VPN server
|
||||
# NOTE: serverAuth deliberately excluded to prevent server impersonation attacks
|
||||
extended_key_usage:
|
||||
- clientAuth # Client Authentication (RFC 5280)
|
||||
- '1.3.6.1.5.5.7.3.17' # IPsec End Entity (RFC 4945)
|
||||
extended_key_usage_critical: false
|
||||
with_items: "{{ users }}"
|
||||
register: client_csr_jobs
|
||||
|
||||
- name: Get OpenSSL version
|
||||
shell: |
|
||||
set -o pipefail
|
||||
{{ openssl_bin }} version |
|
||||
cut -f 2 -d ' '
|
||||
args:
|
||||
executable: bash
|
||||
register: ssl_version
|
||||
run_once: true
|
||||
|
||||
- name: Set OpenSSL version fact
|
||||
set_fact:
|
||||
openssl_version: "{{ ssl_version.stdout }}"
|
||||
|
||||
- name: Build the client's p12
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} pkcs12
|
||||
{{ (openssl_version is version('3', '>=')) | ternary('-legacy', '') }}
|
||||
-in certs/{{ item }}.crt
|
||||
-inkey private/{{ item }}.key
|
||||
-export
|
||||
-name {{ item }}
|
||||
-out private/{{ item }}.p12
|
||||
-passout pass:"{{ p12_export_password }}"
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
executable: bash
|
||||
with_items: "{{ users }}"
|
||||
register: p12
|
||||
- name: Sign server certificate with CA
|
||||
community.crypto.x509_certificate:
|
||||
csr_content: "{{ server_csr.csr }}"
|
||||
path: "{{ ipsec_pki_path }}/certs/{{ IP_subject_alt_name }}.crt"
|
||||
provider: ownca
|
||||
ownca_path: "{{ ipsec_pki_path }}/cacert.pem"
|
||||
ownca_privatekey_path: "{{ ipsec_pki_path }}/private/cakey.pem"
|
||||
ownca_privatekey_passphrase: "{{ CA_password }}"
|
||||
ownca_not_after: "+{{ certificate_validity_days }}d"
|
||||
ownca_not_before: "-1d"
|
||||
mode: "0644"
|
||||
no_log: true
|
||||
|
||||
- name: Build the client's p12 with the CA cert included
|
||||
shell: >
|
||||
umask 077;
|
||||
{{ openssl_bin }} pkcs12
|
||||
{{ (openssl_version is version('3', '>=')) | ternary('-legacy', '') }}
|
||||
-in certs/{{ item }}.crt
|
||||
-inkey private/{{ item }}.key
|
||||
-export
|
||||
-name {{ item }}
|
||||
-out private/{{ item }}_ca.p12
|
||||
-certfile cacert.pem
|
||||
-passout pass:"{{ p12_export_password }}"
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
executable: bash
|
||||
- name: Sign client certificates with CA
|
||||
community.crypto.x509_certificate:
|
||||
csr_content: "{{ item.csr }}"
|
||||
path: "{{ ipsec_pki_path }}/certs/{{ item.item }}.crt"
|
||||
provider: ownca
|
||||
ownca_path: "{{ ipsec_pki_path }}/cacert.pem"
|
||||
ownca_privatekey_path: "{{ ipsec_pki_path }}/private/cakey.pem"
|
||||
ownca_privatekey_passphrase: "{{ CA_password }}"
|
||||
ownca_not_after: "+{{ certificate_validity_days }}d"
|
||||
ownca_not_before: "-1d"
|
||||
mode: "0644"
|
||||
with_items: "{{ client_csr_jobs.results }}"
|
||||
register: client_sign_results
|
||||
no_log: true
|
||||
|
||||
- name: Generate p12 files
|
||||
community.crypto.openssl_pkcs12:
|
||||
path: "{{ ipsec_pki_path }}/private/{{ item }}.p12"
|
||||
friendly_name: "{{ item }}"
|
||||
privatekey_path: "{{ ipsec_pki_path }}/private/{{ item }}.key"
|
||||
certificate_path: "{{ ipsec_pki_path }}/certs/{{ item }}.crt"
|
||||
passphrase: "{{ p12_export_password }}"
|
||||
mode: "0600"
|
||||
encryption_level: "compatibility2022" # Apple device compatibility
|
||||
with_items: "{{ users }}"
|
||||
no_log: true
|
||||
|
||||
- name: Generate p12 files with CA certificate included
|
||||
community.crypto.openssl_pkcs12:
|
||||
path: "{{ ipsec_pki_path }}/private/{{ item }}_ca.p12"
|
||||
friendly_name: "{{ item }}"
|
||||
privatekey_path: "{{ ipsec_pki_path }}/private/{{ item }}.key"
|
||||
certificate_path: "{{ ipsec_pki_path }}/certs/{{ item }}.crt"
|
||||
other_certificates:
|
||||
- "{{ ipsec_pki_path }}/cacert.pem"
|
||||
passphrase: "{{ p12_export_password }}"
|
||||
mode: "0600"
|
||||
encryption_level: "compatibility2022" # Apple device compatibility
|
||||
with_items: "{{ users }}"
|
||||
register: p12
|
||||
no_log: true
|
||||
|
||||
- name: Copy the p12 certificates
|
||||
|
@ -217,56 +216,66 @@
|
|||
with_items:
|
||||
- "{{ users }}"
|
||||
|
||||
- name: Get active users
|
||||
shell: >
|
||||
grep ^V index.txt |
|
||||
grep -v "{{ IP_subject_alt_name }}" |
|
||||
awk '{print $5}' |
|
||||
sed 's/\/CN=//g'
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
register: valid_certs
|
||||
- name: Build openssh public keys
|
||||
community.crypto.openssl_publickey:
|
||||
path: "{{ ipsec_pki_path }}/public/{{ item }}.pub"
|
||||
privatekey_path: "{{ ipsec_pki_path }}/private/{{ item }}.key"
|
||||
format: OpenSSH
|
||||
with_items: "{{ users }}"
|
||||
|
||||
- name: Revoke non-existing users
|
||||
shell: >
|
||||
{{ openssl_bin }} ca -gencrl
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}"))
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-revoke certs/{{ item }}.crt
|
||||
-out crl/{{ item }}.crt
|
||||
register: gencrl
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
creates: crl/{{ item }}.crt
|
||||
executable: bash
|
||||
when: item.split('@')[0] not in users
|
||||
with_items: "{{ valid_certs.stdout_lines }}"
|
||||
- name: Add all users to the file
|
||||
ansible.builtin.lineinfile:
|
||||
path: "{{ ipsec_pki_path }}/all-users"
|
||||
line: "{{ item }}"
|
||||
create: true
|
||||
with_items: "{{ users }}"
|
||||
register: users_file
|
||||
|
||||
- name: Set all users as a fact
|
||||
set_fact:
|
||||
all_users: "{{ lookup('file', ipsec_pki_path + '/all-users').splitlines() }}"
|
||||
|
||||
# Certificate Revocation List (CRL) for removed users
|
||||
- name: Calculate current timestamp for CRL
|
||||
set_fact:
|
||||
crl_timestamp: "{{ '%Y%m%d%H%M%SZ' | strftime(ansible_date_time.epoch | int) }}"
|
||||
|
||||
- name: Identify users whose certificates need revocation
|
||||
set_fact:
|
||||
users_to_revoke: "{{ all_users | difference(users) }}"
|
||||
|
||||
- name: Build revoked certificates list
|
||||
set_fact:
|
||||
revoked_certificates: >-
|
||||
{{ users_to_revoke | map('regex_replace', '^(.*)$',
|
||||
'{"path": "' + ipsec_pki_path + '/certs/\1.crt", "revocation_date": "' + crl_timestamp + '"}') | list }}
|
||||
|
||||
- name: Generate a CRL
|
||||
community.crypto.x509_crl:
|
||||
path: "{{ ipsec_pki_path }}/crl.pem"
|
||||
privatekey_path: "{{ ipsec_pki_path }}/private/cakey.pem"
|
||||
privatekey_passphrase: "{{ CA_password }}"
|
||||
last_update: "{{ '%Y%m%d%H%M%SZ' | strftime(ansible_date_time.epoch | int) }}"
|
||||
next_update: "{{ '%Y%m%d%H%M%SZ' | strftime((ansible_date_time.epoch | int) + (10 * 365 * 24 * 60 * 60)) }}"
|
||||
crl_mode: generate
|
||||
issuer:
|
||||
CN: "{{ IP_subject_alt_name }}"
|
||||
revoked_certificates: "{{ revoked_certificates }}"
|
||||
no_log: true
|
||||
|
||||
- name: Generate new CRL file
|
||||
shell: >
|
||||
{{ openssl_bin }} ca -gencrl
|
||||
-config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:{{ IP_subject_alt_name }}"))
|
||||
-passin pass:"{{ CA_password }}"
|
||||
-out crl/algo.root.pem
|
||||
when:
|
||||
- gencrl is defined
|
||||
- gencrl.changed
|
||||
args:
|
||||
chdir: "{{ ipsec_pki_path }}"
|
||||
executable: bash
|
||||
no_log: true
|
||||
- name: Set CRL file permissions
|
||||
file:
|
||||
path: "{{ ipsec_pki_path }}/crl.pem"
|
||||
mode: "0644"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ ansible_playbook_python }}"
|
||||
certificate_validity_days: 3650 # 10 years - configurable certificate lifespan
|
||||
|
||||
- name: Copy the CRL to the vpn server
|
||||
copy:
|
||||
src: "{{ ipsec_pki_path }}/crl/algo.root.pem"
|
||||
src: "{{ ipsec_pki_path }}/crl.pem"
|
||||
dest: "{{ config_prefix|default('/') }}etc/ipsec.d/crls/algo.root.pem"
|
||||
when:
|
||||
- gencrl is defined
|
||||
- gencrl.changed
|
||||
notify:
|
||||
- rereadcrls
|
||||
|
|
|
@ -2,12 +2,13 @@
|
|||
- name: Set OS specific facts
|
||||
set_fact:
|
||||
strongswan_additional_plugins: []
|
||||
- name: Ubuntu | Install strongSwan
|
||||
- name: Ubuntu | Install strongSwan (individual)
|
||||
apt:
|
||||
name: strongswan
|
||||
state: present
|
||||
update_cache: true
|
||||
install_recommends: true
|
||||
when: not performance_parallel_packages | default(true)
|
||||
|
||||
- block:
|
||||
# https://bugs.launchpad.net/ubuntu/+source/strongswan/+bug/1826238
|
||||
|
|
|
@ -1,2 +1,24 @@
|
|||
# Algo VPN systemd security hardening for StrongSwan
|
||||
# Enhanced hardening on top of existing AppArmor
|
||||
[Service]
|
||||
MemoryLimit=16777216
|
||||
# Privilege restrictions
|
||||
NoNewPrivileges=yes
|
||||
|
||||
# Filesystem isolation (complements AppArmor)
|
||||
ProtectHome=yes
|
||||
PrivateTmp=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectControlGroups=yes
|
||||
|
||||
# Network restrictions - include IPsec kernel communication requirements
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_NETLINK AF_PACKET
|
||||
|
||||
# Allow access to IPsec configuration, state, and kernel interfaces
|
||||
ReadWritePaths=/etc/ipsec.d /var/lib/strongswan
|
||||
ReadOnlyPaths=/proc/net/pfkey
|
||||
|
||||
# System call filtering (complements AppArmor restrictions)
|
||||
# Allow crypto operations, remove cpu-emulation restriction for crypto algorithms
|
||||
SystemCallFilter=@system-service @network-io
|
||||
SystemCallFilter=~@debug @mount @swap @reboot
|
||||
SystemCallErrorNumber=EPERM
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue