Compare commits

..

No commits in common. "dev" and "1.8.3" have entirely different histories.
dev ... 1.8.3

1276 changed files with 20800 additions and 242088 deletions

View file

@ -1,6 +0,0 @@
CompileFlags:
Add:
- "-std=c++17"
- "-I../ext"
- "-I../ext/prometheus-cpp-lite-1.0/core/include"
- "-I../ext/prometheus-cpp-lite-1.0/simpleapi/include"

View file

@ -1,256 +0,0 @@
//
// tweakables
//
local registry = "084037375216.dkr.ecr.us-east-2.amazonaws.com";
local build_channel = "zerotier-builds";
local release_channel = "zerotier-releases";
local targets = [
{ "os": "linux", distro: "redhat", "name": "el9", "isas": [ "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "tag", "custom" ] },
{ "os": "linux", distro: "redhat", "name": "el8", "isas": [ "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "tag" ] },
{ "os": "linux", distro: "redhat", "name": "el7", "isas": [ "386", "amd64", "ppc64le"], "events": [ "tag" ] },
{ "os": "linux", distro: "amazon", "name": "amzn2", "isas": [ "amd64", "arm64" ], "events": [ "tag" ] },
{ "os": "linux", distro: "amazon", "name": "amzn2022", "isas": [ "amd64", "arm64" ], "events": [ "tag" ] },
{ "os": "linux", distro: "fedora", "name": "fc38", "isas": [ "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "tag" ] },
{ "os": "linux", distro: "fedora", "name": "fc37", "isas": [ "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "tag" ] },
{ "os": "linux", distro: "fedora", "name": "fc36", "isas": [ "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "tag" ] },
{ "os": "linux", distro: "ubuntu", "name": "jammy", "isas": [ "armv7", "amd64", "arm64", "ppc64le", "s390x", "riscv64" ], "events": [ "tag" ] },
{ "os": "linux", distro: "ubuntu", "name": "focal", "isas": [ "armv7", "amd64", "arm64", "ppc64le", "s390x", "riscv64" ], "events": [ "tag" ] },
{ "os": "linux", distro: "ubuntu", "name": "bionic", "isas": [ "386", "armv7", "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "tag" ] },
{ "os": "linux", distro: "ubuntu", "name": "xenial", "isas": [ "386", "armv7", "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "tag" ] },
{ "os": "linux", distro: "ubuntu", "name": "trusty", "isas": [ "386", "armv7", "amd64", "arm64" ], "events": [ "tag" ] },
{ "os": "linux", distro: "debian", "name": "bookworm", "isas": [ "386", "armv7", "amd64", "arm64", "mips64le", "ppc64le", "s390x" ], "events": [ "tag"] },
{ "os": "linux", distro: "debian", "name": "bullseye", "isas": [ "386", "armv7", "amd64", "arm64", "mips64le", "ppc64le", "s390x" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "debian", "name": "buster", "isas": [ "386", "armv7", "amd64", "arm64" ], "events": [ "tag" ] },
{ "os": "linux", distro: "debian", "name": "stretch", "isas": [ "386", "armv7", "amd64", "arm64" ], "events": [ "tag" ] },
{ "os": "linux", distro: "debian", "name": "jessie", "isas": [ "386", "armv7", "amd64" ], "events": [ "tag" ] },
// { "os": "windows", distro: "windows", "name": "windows", "isas": [ "amd64" ], "events": [ "push", "tag", "custom" ] },
// { "os": "darwin", distro: "darwin", "name": "darwin", "isas": [ "amd64" ], "events": [ "push", "tag", "custom" ] },
];
local less_targets = [
{ "os": "linux", distro: "redhat", "name": "el9", "isas": [ "amd64", "arm64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "redhat", "name": "el8", "isas": [ "amd64", "arm64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "ubuntu", "name": "jammy", "isas": [ "armv7", "amd64", "arm64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "ubuntu", "name": "focal", "isas": [ "armv7", "amd64", "arm64" ], "events": [ "push", "tag", "custom" ] },
];
local native_targets = [
{ "os": "linux", distro: "debian", "name": "bullseye", "isas": [ "386", "armv7", "amd64", "arm64" ], "events": [ "push", "tag", "custom" ] },
];
local master_targets = [
//
// copypasta from here
//
{ "os": "linux", distro: "redhat", "name": "el9", "isas": [ "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "redhat", "name": "el8", "isas": [ "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "redhat", "name": "el7", "isas": [ "386", "amd64", "ppc64le"], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "amazon", "name": "amzn2", "isas": [ "amd64", "arm64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "amazon", "name": "amzn2022", "isas": [ "amd64", "arm64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "fedora", "name": "fc38", "isas": [ "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "fedora", "name": "fc37", "isas": [ "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "fedora", "name": "fc36", "isas": [ "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "ubuntu", "name": "jammy", "isas": [ "armv7", "amd64", "arm64", "ppc64le", "s390x", "riscv64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "ubuntu", "name": "focal", "isas": [ "armv7", "amd64", "arm64", "ppc64le", "s390x", "riscv64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "ubuntu", "name": "bionic", "isas": [ "386", "armv7", "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "ubuntu", "name": "xenial", "isas": [ "386", "armv7", "amd64", "arm64", "ppc64le", "s390x" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "ubuntu", "name": "trusty", "isas": [ "386", "armv7", "amd64", "arm64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "debian", "name": "sid", "isas": [ "386", "armv7", "amd64", "arm64", "mips64le", "ppc64le", "s390x", "riscv64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "debian", "name": "bookworm", "isas": [ "386", "armv7", "amd64", "arm64", "mips64le", "ppc64le", "s390x" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "debian", "name": "bullseye", "isas": [ "386", "armv7", "amd64", "arm64", "mips64le", "ppc64le", "s390x" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "debian", "name": "buster", "isas": [ "386", "armv7", "amd64", "arm64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "debian", "name": "stretch", "isas": [ "386", "armv7", "amd64", "arm64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "linux", distro: "debian", "name": "jessie", "isas": [ "386", "armv7", "amd64" ], "events": [ "push", "tag", "custom" ] },
{ "os": "windows", distro: "windows", "name": "win2k22", "isas": [ "amd64" ], "events": [ "push", "tag", "custom" ] }
];
//
// functions
//
local pipeline_type(os) = if os == "darwin" then "exec" else "docker";
local builder_image(os) = if os == "linux" then registry + "/honda-builder" else registry + "/windows-builder";
local tester_image(os) = if os == "linux" then registry + "/honda-builder" else registry + "/windows-tester";
local build_step_volumes(os) = if os == "linux" then [ { name: "zerotier-builds", path: "/zerotier-builds" } ] else [];
local release_step_volumes(os) = if os == "linux" then [ { name: "zerotier-releases", path: "/zerotier-releases" } ] else [];
local host_volumes(os) = if os == "linux" then [
{ name: "zerotier-builds", host: { path: "/zerotier-builds" } },
{ name: "zerotier-releases", host: { path: "/zerotier-releases" } },
] else [];
local index_image(distro) =
if distro == "debian" || distro == "ubuntu" then
registry + "/apt-builder"
else if distro == "redhat" || distro == "fedora" || distro == "amazon" then
registry + "/dnf-builder"
else if distro == "windows" then
registry + "/msi-builder"
;
local copy_commands(os, distro, name, isa, version) =
if os == "linux" then [
std.join(" ", [ "./ci/scripts/publish.sh", name, distro, isa, version, "${DRONE_BUILD_EVENT}" ])
]
else if os == "windows" then [
"C:\\scripts\\fix-ec2-metadata.ps1",
"Get-ChildItem windows",
// "aws s3 cp windows\\bytey-SetupFiles\\bytey.msi s3://zerotier-builds/windows/" + version + "/bytey.msi",
] else if os == "darwin" then [
"echo hello"
]
;
local index_commands(os, channel, distro, name, isas) =
if os == "linux" then
[ "/usr/local/bin/index " + channel + " " + distro + " " + name + " " + std.join(" ", isas) ]
else if os == "windows" then
[ "Get-ChildItem -Recurse windows" ]
;
local build_commands(os, distro, name, isa, version) =
if os == "linux" then
[ std.join(" ", [ "./ci/scripts/build.sh", name, distro, isa, version, "${DRONE_BUILD_EVENT}" ]) ]
else
if os == "windows" then
[ "windows/build.ps1", "windows/package.ps1" ]
else
if os == "darwin" then
[ "whoami" ]
;
local test_commands(os, distro, name, isa, version) =
if os == "linux" then
[ std.join(" ", [ "./ci/scripts/test.sh", name, distro, isa, version, "${DRONE_BUILD_EVENT}" ]) ]
else
if os == "windows" then
[ "windows/testpackage.ps1 " + version ]
;
//
// render
//
local Build(os, distro, name, isa, events) = {
"kind": "pipeline",
"type": pipeline_type(os),
"name": std.join(" ", [ name, isa, "build" ]),
"pull": "always",
"clone": { "depth": 1, [ if os == "darwin" then "disable" ]: true },
"steps": [
{
"name": "build",
"image": builder_image(os),
"commands": build_commands(os, distro, name, isa, "100.0.0+${DRONE_COMMIT_SHA:0:8}"),
"when": { "event": [ "push" ]},
},
{
"name": "release",
"image": builder_image(os),
"commands": build_commands(os, distro, name, isa, "${DRONE_TAG}"),
"when": { "event": [ "tag" ]},
},
{
"name": "copy build",
"image": builder_image(os),
"commands": copy_commands(os, distro, name, isa, "100.0.0+${DRONE_COMMIT_SHA:0:8}"),
"volumes": build_step_volumes(os),
"when": { "event": [ "push" ]},
},
{
"name": "copy relase",
"image": builder_image(os),
"commands": copy_commands(os, distro, name, isa, "${DRONE_TAG}"),
"volumes": release_step_volumes(os),
"when": { "event": [ "tag" ]},
},
],
"volumes": host_volumes(os),
"platform": { "os": os, [ if isa == "arm64" || isa == "armv7" then "arch" ]: "arm64" },
"trigger": { "event": events }
};
local Test(os, distro, name, isa, events) = {
"kind": "pipeline",
"type": pipeline_type(os),
"name": std.join(" ", [ name, isa, "test"]),
"pull": "always",
"clone": { "depth": 1 },
"steps": [
{
"name": "test build",
"image": tester_image(os),
"volumes": build_step_volumes(os),
"commands": test_commands(os, distro, name, isa, "100.0.0+${DRONE_COMMIT_SHA:0:8}"),
"when": { "event": [ "push" ]},
},
{
"name": "test release",
"image": tester_image(os),
"volumes": release_step_volumes(os),
"commands": test_commands(os, distro, name, isa, "${DRONE_TAG}"),
"when": { "event": [ "tag" ]},
},
],
"volumes": host_volumes(os),
"platform": { "os": os, [ if isa == "arm64" || isa == "armv7" then "arch" ]: "arm64" },
"depends_on": [ std.join(" ", [ name, "index" ]) ],
"trigger": { "event": events }
};
local Index(p) = {
"kind": "pipeline",
"type": pipeline_type(p.os),
"name": std.join(" ", [ p.name, "index" ]),
"pull": "always",
"clone": { "depth": 1 },
"steps": [
{
"name": "index build",
"image": index_image(p.distro),
"commands": index_commands(p.os, "zerotier-builds", p.distro, p.name, p.isas),
"volumes": build_step_volumes(p.os),
"environment":{ "GPG_PRIVATE_KEY": { from_secret: "gpg-private-key" }},
"when": { "event": [ "push" ]},
},
{
"name": "index release",
"image": index_image(p.distro),
"commands": index_commands(p.os, "zerotier-releases", p.distro, p.name, p.isas),
"volumes": release_step_volumes(p.os),
"environment":{ "GPG_PRIVATE_KEY": { from_secret: "gpg-private-key" }},
"when": { "event": [ "tag" ]},
},
],
"volumes": host_volumes(p.os),
"platform": { "os": p.os },
depends_on: std.flattenArrays([ [ std.join(" ", [ p.name, isa, "build" ]) ] for isa in p.isas ]),
"trigger": { "event": p.events }
};
//
// print
//
std.flattenArrays([
[
Build(p.os, p.distro, p.name, isa, p.events)
for isa in p.isas
] +
[
Index(p)
]
for p in native_targets
]) +
std.flattenArrays([
[
Test(p.os, p.distro, p.name, isa, p.events)
for isa in p.isas
]
for p in native_targets
])

View file

@ -1,465 +0,0 @@
---
clone:
depth: 1
kind: pipeline
name: bullseye 386 build
platform:
os: linux
pull: always
steps:
- commands:
- ./ci/scripts/build.sh bullseye debian 386 100.0.0+${DRONE_COMMIT_SHA:0:8} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: build
when:
event:
- push
- commands:
- ./ci/scripts/build.sh bullseye debian 386 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: release
when:
event:
- tag
- commands:
- ./ci/scripts/publish.sh bullseye debian 386 100.0.0+${DRONE_COMMIT_SHA:0:8} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: copy build
volumes:
- name: zerotier-builds
path: /zerotier-builds
when:
event:
- push
- commands:
- ./ci/scripts/publish.sh bullseye debian 386 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: copy relase
volumes:
- name: zerotier-releases
path: /zerotier-releases
when:
event:
- tag
trigger:
event:
- push
- tag
- custom
type: docker
volumes:
- host:
path: /zerotier-builds
name: zerotier-builds
- host:
path: /zerotier-releases
name: zerotier-releases
---
clone:
depth: 1
kind: pipeline
name: bullseye armv7 build
platform:
arch: arm64
os: linux
pull: always
steps:
- commands:
- ./ci/scripts/build.sh bullseye debian armv7 100.0.0+${DRONE_COMMIT_SHA:0:8} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: build
when:
event:
- push
- commands:
- ./ci/scripts/build.sh bullseye debian armv7 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: release
when:
event:
- tag
- commands:
- ./ci/scripts/publish.sh bullseye debian armv7 100.0.0+${DRONE_COMMIT_SHA:0:8}
${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: copy build
volumes:
- name: zerotier-builds
path: /zerotier-builds
when:
event:
- push
- commands:
- ./ci/scripts/publish.sh bullseye debian armv7 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: copy relase
volumes:
- name: zerotier-releases
path: /zerotier-releases
when:
event:
- tag
trigger:
event:
- push
- tag
- custom
type: docker
volumes:
- host:
path: /zerotier-builds
name: zerotier-builds
- host:
path: /zerotier-releases
name: zerotier-releases
---
clone:
depth: 1
kind: pipeline
name: bullseye amd64 build
platform:
os: linux
pull: always
steps:
- commands:
- ./ci/scripts/build.sh bullseye debian amd64 100.0.0+${DRONE_COMMIT_SHA:0:8} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: build
when:
event:
- push
- commands:
- ./ci/scripts/build.sh bullseye debian amd64 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: release
when:
event:
- tag
- commands:
- ./ci/scripts/publish.sh bullseye debian amd64 100.0.0+${DRONE_COMMIT_SHA:0:8}
${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: copy build
volumes:
- name: zerotier-builds
path: /zerotier-builds
when:
event:
- push
- commands:
- ./ci/scripts/publish.sh bullseye debian amd64 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: copy relase
volumes:
- name: zerotier-releases
path: /zerotier-releases
when:
event:
- tag
trigger:
event:
- push
- tag
- custom
type: docker
volumes:
- host:
path: /zerotier-builds
name: zerotier-builds
- host:
path: /zerotier-releases
name: zerotier-releases
---
clone:
depth: 1
kind: pipeline
name: bullseye arm64 build
platform:
arch: arm64
os: linux
pull: always
steps:
- commands:
- ./ci/scripts/build.sh bullseye debian arm64 100.0.0+${DRONE_COMMIT_SHA:0:8} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: build
when:
event:
- push
- commands:
- ./ci/scripts/build.sh bullseye debian arm64 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: release
when:
event:
- tag
- commands:
- ./ci/scripts/publish.sh bullseye debian arm64 100.0.0+${DRONE_COMMIT_SHA:0:8}
${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: copy build
volumes:
- name: zerotier-builds
path: /zerotier-builds
when:
event:
- push
- commands:
- ./ci/scripts/publish.sh bullseye debian arm64 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: copy relase
volumes:
- name: zerotier-releases
path: /zerotier-releases
when:
event:
- tag
trigger:
event:
- push
- tag
- custom
type: docker
volumes:
- host:
path: /zerotier-builds
name: zerotier-builds
- host:
path: /zerotier-releases
name: zerotier-releases
---
clone:
depth: 1
depends_on:
- bullseye 386 build
- bullseye armv7 build
- bullseye amd64 build
- bullseye arm64 build
kind: pipeline
name: bullseye index
platform:
os: linux
pull: always
steps:
- commands:
- /usr/local/bin/index zerotier-builds debian bullseye 386 armv7 amd64 arm64
environment:
GPG_PRIVATE_KEY:
from_secret: gpg-private-key
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/apt-builder
name: index build
volumes:
- name: zerotier-builds
path: /zerotier-builds
when:
event:
- push
- commands:
- /usr/local/bin/index zerotier-releases debian bullseye 386 armv7 amd64 arm64
environment:
GPG_PRIVATE_KEY:
from_secret: gpg-private-key
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/apt-builder
name: index release
volumes:
- name: zerotier-releases
path: /zerotier-releases
when:
event:
- tag
trigger:
event:
- push
- tag
- custom
type: docker
volumes:
- host:
path: /zerotier-builds
name: zerotier-builds
- host:
path: /zerotier-releases
name: zerotier-releases
---
clone:
depth: 1
depends_on:
- bullseye index
kind: pipeline
name: bullseye 386 test
platform:
os: linux
pull: always
steps:
- commands:
- ./ci/scripts/test.sh bullseye debian 386 100.0.0+${DRONE_COMMIT_SHA:0:8} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: test build
volumes:
- name: zerotier-builds
path: /zerotier-builds
when:
event:
- push
- commands:
- ./ci/scripts/test.sh bullseye debian 386 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: test release
volumes:
- name: zerotier-releases
path: /zerotier-releases
when:
event:
- tag
trigger:
event:
- push
- tag
- custom
type: docker
volumes:
- host:
path: /zerotier-builds
name: zerotier-builds
- host:
path: /zerotier-releases
name: zerotier-releases
---
clone:
depth: 1
depends_on:
- bullseye index
kind: pipeline
name: bullseye armv7 test
platform:
arch: arm64
os: linux
pull: always
steps:
- commands:
- ./ci/scripts/test.sh bullseye debian armv7 100.0.0+${DRONE_COMMIT_SHA:0:8} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: test build
volumes:
- name: zerotier-builds
path: /zerotier-builds
when:
event:
- push
- commands:
- ./ci/scripts/test.sh bullseye debian armv7 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: test release
volumes:
- name: zerotier-releases
path: /zerotier-releases
when:
event:
- tag
trigger:
event:
- push
- tag
- custom
type: docker
volumes:
- host:
path: /zerotier-builds
name: zerotier-builds
- host:
path: /zerotier-releases
name: zerotier-releases
---
clone:
depth: 1
depends_on:
- bullseye index
kind: pipeline
name: bullseye amd64 test
platform:
os: linux
pull: always
steps:
- commands:
- ./ci/scripts/test.sh bullseye debian amd64 100.0.0+${DRONE_COMMIT_SHA:0:8} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: test build
volumes:
- name: zerotier-builds
path: /zerotier-builds
when:
event:
- push
- commands:
- ./ci/scripts/test.sh bullseye debian amd64 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: test release
volumes:
- name: zerotier-releases
path: /zerotier-releases
when:
event:
- tag
trigger:
event:
- push
- tag
- custom
type: docker
volumes:
- host:
path: /zerotier-builds
name: zerotier-builds
- host:
path: /zerotier-releases
name: zerotier-releases
---
clone:
depth: 1
depends_on:
- bullseye index
kind: pipeline
name: bullseye arm64 test
platform:
arch: arm64
os: linux
pull: always
steps:
- commands:
- ./ci/scripts/test.sh bullseye debian arm64 100.0.0+${DRONE_COMMIT_SHA:0:8} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: test build
volumes:
- name: zerotier-builds
path: /zerotier-builds
when:
event:
- push
- commands:
- ./ci/scripts/test.sh bullseye debian arm64 ${DRONE_TAG} ${DRONE_BUILD_EVENT}
image: 084037375216.dkr.ecr.us-east-2.amazonaws.com/honda-builder
name: test release
volumes:
- name: zerotier-releases
path: /zerotier-releases
when:
event:
- tag
trigger:
event:
- push
- tag
- custom
type: docker
volumes:
- host:
path: /zerotier-builds
name: zerotier-builds
- host:
path: /zerotier-releases
name: zerotier-releases
---
kind: signature
hmac: 887a3ef78d3fe8f0149911e1e4876401dd7dd313b36eb893e791fa42f45d7768
...

View file

@ -11,21 +11,39 @@ assignees: ''
_Using these will ensure you get quicker support, and make this space available for code-related issues. Thank you!_
- [Docs Site](https://docs.zerotier.com/zerotier/troubleshooting) => Troubleshooting, quickstarts, and more advanced topics.
- [Knowledge Base](https://zerotier.atlassian.net/wiki/spaces/SD/overview) => Guides and documentation on how to use ZeroTier.
- [Discuss Forum](https://discuss.zerotier.com/) => Our discussion forum for users and support to mutually resolve issues & suggest ideas.
- [Reddit](https://www.reddit.com/r/zerotier/) => Our subreddit, which we monitor regularly and is fairly active.
- [Knowledge Base](https://zerotier.atlassian.net/wiki/spaces/SD/overview) => Older wiki.
If you are having a connection issue, it's much easier to diagnose through the discussion forum or the ticket system.
# If you still want to file a Bug Report
## Please let us know
## Required
- What you expect to be happening.
- What is actually happening?
- Any steps to reproduce the error.
- Any relevant console output or screenshots.
- What operating system and ZeroTier version. Please try the latest ZeroTier release.
- Any screenshots that would help us out.
## Additional information
**Desktop (please complete the following information):**
- OS: [e.g. Mac, Linux, Windows, BSD]
- OS/Distribution Version
- ZeroTier Version [e.g. 1.4.6]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- ZeroTier Version [e.g. 1.4.6]
**Embedded & NAS (please complete the following information):**
- Device: [e.g. Synology, Pi4]
- OS/Distribution (if applicable)
- ZeroTier Version [e.g. 1.4.6]
**Additional context**
- ZeroTier Network Configuration: IPv4 & IPv6 networks defined on your ZeroTier Central
- Router Config: are you permitting port 9993, uPnP, and NAT-PMP?
- Firewall Config: are you permitting port 9993 on your OS; setting it to "Private" on Windows?
- Are you using this at home, in an office, college, etc?
- Have you tried screaming into your router?

17
.github/workflows/badstrings.yml vendored Normal file
View file

@ -0,0 +1,17 @@
name: badstrings
on: [pull_request, push]
jobs:
badstrings:
runs-on: ubuntu-latest
steps:
- name: scanning commit message for bad strings
run: |
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
message="${{ github.event.head_commit.message }}"
strings=($(curl -s https://raw.githubusercontent.com/someara/badstrings/main/strings.txt))
for i in ${strings[@]} ; do
echo "${message}" | grep -v "$i" &>/dev/null;
done

View file

@ -1,126 +0,0 @@
on:
pull_request:
push:
workflow_dispatch:
jobs:
build_ubuntu:
runs-on: ubuntu-latest
steps:
- name: gitconfig
run: |
git config --global core.autocrlf input
# git config --global core.eol lf
- name: checkout
uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
targets: x86_64-unknown-linux-gnu
components: rustfmt, clippy
- name: Set up cargo cache
uses: Swatinem/rust-cache@v2
continue-on-error: false
with:
key: ${{ runner.os }}-cargo-${{ hashFiles('rustybits//Cargo.lock') }}
shared-key: ${{ runner.os }}-cargo-
workspaces: |
rustybits/
- name: make
run: make
- name: selftest
run: |
make selftest
./zerotier-selftest
- name: 'Tar files' # keeps permissions (execute)
run: tar -cvf zerotier-one.tar zerotier-one
- name: Archive production artifacts
uses: actions/upload-artifact@v4
with:
name: zerotier-one-ubuntu-x64
path: zerotier-one.tar
retention-days: 7
build_macos:
runs-on: macos-latest
steps:
- name: gitconfig
run: |
git config --global core.autocrlf input
# git config --global core.eol lf
- name: checkout
uses: actions/checkout@v4
- name: Install Rust aarch64
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
target: aarch64-apple-darwin
components: rustfmt, clippy
- name: Install Rust x86_64
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
target: x86_64-apple-darwin
components: rustfmt, clippy
- name: Set up cargo cache
uses: Swatinem/rust-cache@v2
continue-on-error: false
with:
key: ${{ runner.os }}-cargo-${{ hashFiles('rustybits//Cargo.lock') }}
shared-key: ${{ runner.os }}-cargo-
workspaces: |
rustybits/
- name: make
run: make
- name: selftest
run: |
make selftest
./zerotier-selftest
- name: 'Tar files' # keeps permissions (execute)
run: tar -cvf zerotier-one.tar zerotier-one
- name: Archive production artifacts
uses: actions/upload-artifact@v4
with:
name: zerotier-one-mac
path: zerotier-one.tar
retention-days: 7
build_windows:
runs-on: windows-latest
steps:
- name: gitconfig
run: |
git config --global core.autocrlf true
# git config --global core.eol lf
- name: checkout
uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
target: aarch64-apple-darwin
components: rustfmt, clippy
- name: Set up cargo cache
uses: Swatinem/rust-cache@v2
continue-on-error: false
with:
key: ${{ runner.os }}-cargo-${{ hashFiles('rustybits//Cargo.lock') }}
shared-key: ${{ runner.os }}-cargo-
workspaces: |
rustybits/
- name: setup msbuild
uses: microsoft/setup-msbuild@v2
- name: msbuild
run: |
msbuild windows\ZeroTierOne.sln /m /p:Configuration=Release /property:Platform=x64 /t:ZeroTierOne
- name: Archive production artifacts
uses: actions/upload-artifact@v4
with:
name: zerotier-one-windows
path: windows/Build
retention-days: 7

View file

@ -1,497 +0,0 @@
#!/bin/bash
# This test script joins Earth and pokes some stuff
TEST_NETWORK=8056c2e21c000001
RUN_LENGTH=30
TEST_FINISHED=false
ZTO_VER=$(git describe --tags $(git rev-list --tags --max-count=1))
ZTO_COMMIT=$(git rev-parse HEAD)
ZTO_COMMIT_SHORT=$(git rev-parse --short HEAD)
TEST_DIR_PREFIX="$ZTO_VER-$ZTO_COMMIT_SHORT-test-results"
TEST_OK=0
TEST_FAIL=1
echo "Performing test on: $ZTO_VER-$ZTO_COMMIT_SHORT"
TEST_FILEPATH_PREFIX="$TEST_DIR_PREFIX/$ZTO_COMMIT_SHORT"
mkdir $TEST_DIR_PREFIX
# How long we will wait for ZT to come online before considering it a failure
MAX_WAIT_SECS=30
ZT_PORT_NODE_1=9996
ZT_PORT_NODE_2=9997
################################################################################
# Multi-node connectivity and performance test #
################################################################################
test() {
echo -e "\nPerforming pre-flight checks"
check_exit_on_invalid_identity
echo -e "\nRunning test for $RUN_LENGTH seconds"
export NS1="ip netns exec ns1"
export NS2="ip netns exec ns2"
export ZT1="$NS1 ./zerotier-cli -p9996 -D$(pwd)/node1"
# Specify custom port on one node to ensure that feature works
export ZT2="$NS2 ./zerotier-cli -p9997 -D$(pwd)/node2"
echo -e "\nSetting up network namespaces..."
echo "Setting up ns1"
ip netns add ns1
$NS1 ip link set dev lo up
ip link add veth0 type veth peer name veth1
ip link set veth1 netns ns1
ip addr add 192.168.0.1/24 dev veth0
ip link set dev veth0 up
$NS1 ip addr add 192.168.0.2/24 dev veth1
$NS1 ip link set dev veth1 up
# Add default route
$NS1 ip route add default via 192.168.0.1
iptables -t nat -A POSTROUTING -s 192.168.0.0/255.255.255.0 \
-o eth0 -j MASQUERADE
iptables -A FORWARD -i eth0 -o veth0 -j ACCEPT
iptables -A FORWARD -o eth0 -i veth0 -j ACCEPT
echo "Setting up ns2"
ip netns add ns2
$NS2 ip link set dev lo up
ip link add veth2 type veth peer name veth3
ip link set veth3 netns ns2
ip addr add 192.168.1.1/24 dev veth2
ip link set dev veth2 up
$NS2 ip addr add 192.168.1.2/24 dev veth3
$NS2 ip link set dev veth3 up
$NS2 ip route add default via 192.168.1.1
iptables -t nat -A POSTROUTING -s 192.168.1.0/255.255.255.0 \
-o eth0 -j MASQUERADE
iptables -A FORWARD -i eth0 -o veth2 -j ACCEPT
iptables -A FORWARD -o eth0 -i veth2 -j ACCEPT
# Allow forwarding
sysctl -w net.ipv4.ip_forward=1
################################################################################
# Memory Leak Check #
################################################################################
export FILENAME_MEMORY_LOG="$TEST_FILEPATH_PREFIX-memory.log"
echo -e "\nStarting a ZeroTier instance in each namespace..."
export time_test_start=$(date +%s)
# Spam the CLI as ZeroTier is starting
spam_cli 100
echo "Starting memory leak check"
$NS1 sudo valgrind --demangle=yes --exit-on-first-error=yes \
--error-exitcode=1 \
--xml=yes \
--xml-file=$FILENAME_MEMORY_LOG \
--leak-check=full \
./zerotier-one node1 -p$ZT_PORT_NODE_1 -U >>node_1.log 2>&1 &
# Second instance, not run in memory profiler
# Don't set up internet access until _after_ zerotier is running
# This has been a source of stuckness in the past.
$NS2 ip addr del 192.168.1.2/24 dev veth3
$NS2 sudo ./zerotier-one node2 -U -p$ZT_PORT_NODE_2 >>node_2.log 2>&1 &
sleep 10; # New HTTP control plane is a bit sluggish, so we delay here
check_bind_to_correct_ports $ZT_PORT_NODE_1
check_bind_to_correct_ports $ZT_PORT_NODE_2
$NS2 ip addr add 192.168.1.2/24 dev veth3
$NS2 ip route add default via 192.168.1.1
echo -e "\nPing from host to namespaces"
ping -c 3 192.168.0.1
ping -c 3 192.168.1.1
echo -e "\nPing from namespace to host"
$NS1 ping -c 3 192.168.0.1
$NS1 ping -c 3 192.168.0.1
$NS2 ping -c 3 192.168.0.2
$NS2 ping -c 3 192.168.0.2
echo -e "\nPing from ns1 to ns2"
$NS1 ping -c 3 192.168.0.1
echo -e "\nPing from ns2 to ns1"
$NS2 ping -c 3 192.168.0.1
################################################################################
# Online Check #
################################################################################
echo "Waiting for ZeroTier to come online before attempting test..."
node1_online=false
node2_online=false
both_instances_online=false
time_zt_node1_start=$(date +%s)
time_zt_node2_start=$(date +%s)
for ((s = 0; s <= $MAX_WAIT_SECS; s++)); do
node1_online="$($ZT1 -j info | jq '.online' 2>/dev/null)"
node2_online="$($ZT2 -j info | jq '.online' 2>/dev/null)"
echo "Checking for online status: try #$s, node1:$node1_online, node2:$node2_online"
if [[ "$node2_online" == "true" && "$node1_online" == "true" ]]; then
export both_instances_online=true
export time_to_both_nodes_online=$(date +%s)
break
fi
sleep 1
done
echo -e "\n\nContents of ZeroTier home paths:"
ls -lga node1
tree node1
ls -lga node2
tree node2
echo -e "\n\nRunning ZeroTier processes:"
echo -e "\nNode 1:\n"
$NS1 ps aux | grep zerotier-one
echo -e "\nNode 2:\n"
$NS2 ps aux | grep zerotier-one
echo -e "\n\nStatus of each instance:"
echo -e "\n\nNode 1:\n"
$ZT1 status
echo -e "\n\nNode 2:\n"
$ZT2 status
if [[ "$both_instances_online" != "true" ]]; then
exit_test_and_generate_report $TEST_FAIL "one or more nodes failed to come online"
fi
echo -e "\nJoining networks"
$ZT1 join $TEST_NETWORK
$ZT2 join $TEST_NETWORK
sleep 10
node1_ip4=$($ZT1 get $TEST_NETWORK ip4)
node2_ip4=$($ZT2 get $TEST_NETWORK ip4)
echo "node1_ip4=$node1_ip4"
echo "node2_ip4=$node2_ip4"
echo -e "\nPinging each node"
PING12_FILENAME="$TEST_FILEPATH_PREFIX-ping-1-to-2.txt"
PING21_FILENAME="$TEST_FILEPATH_PREFIX-ping-2-to-1.txt"
$NS1 ping -c 16 $node2_ip4 >$PING12_FILENAME
$NS2 ping -c 16 $node1_ip4 >$PING21_FILENAME
ping_loss_percent_1_to_2=$(cat $PING12_FILENAME |
grep "packet loss" | awk '{print $6}' | sed 's/%//')
ping_loss_percent_2_to_1=$(cat $PING21_FILENAME |
grep "packet loss" | awk '{print $6}' | sed 's/%//')
# Normalize loss value
export ping_loss_percent_1_to_2=$(echo "scale=2; $ping_loss_percent_1_to_2/100.0" | bc)
export ping_loss_percent_2_to_1=$(echo "scale=2; $ping_loss_percent_2_to_1/100.0" | bc)
################################################################################
# CLI Check #
################################################################################
echo "Testing basic CLI functionality..."
spam_cli 10
$ZT1 join $TEST_NETWORK
$ZT1 -h
$ZT1 -v
$ZT1 status
$ZT1 info
$ZT1 listnetworks
$ZT1 peers
$ZT1 listpeers
$ZT1 -j status
$ZT1 -j info
$ZT1 -j listnetworks
$ZT1 -j peers
$ZT1 -j listpeers
$ZT1 dump
$ZT1 get $TEST_NETWORK allowDNS
$ZT1 get $TEST_NETWORK allowDefault
$ZT1 get $TEST_NETWORK allowGlobal
$ZT1 get $TEST_NETWORK allowManaged
$ZT1 get $TEST_NETWORK bridge
$ZT1 get $TEST_NETWORK broadcastEnabled
$ZT1 get $TEST_NETWORK dhcp
$ZT1 get $TEST_NETWORK id
$ZT1 get $TEST_NETWORK mac
$ZT1 get $TEST_NETWORK mtu
$ZT1 get $TEST_NETWORK name
$ZT1 get $TEST_NETWORK netconfRevision
$ZT1 get $TEST_NETWORK nwid
$ZT1 get $TEST_NETWORK portDeviceName
$ZT1 get $TEST_NETWORK portError
$ZT1 get $TEST_NETWORK status
$ZT1 get $TEST_NETWORK type
# Test an invalid command
$ZT1 get $TEST_NETWORK derpderp
# TODO: Validate JSON
# Performance Test
export FILENAME_PERF_JSON="$TEST_FILEPATH_PREFIX-iperf.json"
echo -e "\nBeginning performance test:"
echo -e "\nStarting server:"
echo "$NS1 iperf3 -s &"
sleep 1
echo -e "\nStarting client:"
sleep 1
echo "$NS2 iperf3 --json -c $node1_ip4 > $FILENAME_PERF_JSON"
cat $FILENAME_PERF_JSON
# Let ZeroTier idle long enough for various timers
echo -e "\nIdling ZeroTier for $RUN_LENGTH seconds..."
sleep $RUN_LENGTH
echo -e "\nLeaving networks"
$ZT1 leave $TEST_NETWORK
$ZT2 leave $TEST_NETWORK
sleep 5
exit_test_and_generate_report $TEST_OK "completed test"
}
################################################################################
# Generate report #
################################################################################
exit_test_and_generate_report() {
echo -e "\nStopping memory check..."
sudo pkill -15 -f valgrind
sleep 10
time_test_end=$(date +%s)
echo "Exiting test with reason: $2 ($1)"
# Collect ZeroTier dump files
echo -e "\nCollecting ZeroTier dump files"
node1_id=$($ZT1 -j status | jq -r .address)
node2_id=$($ZT2 -j status | jq -r .address)
$ZT1 dump
mv zerotier_dump.txt "$TEST_FILEPATH_PREFIX-node-dump-$node1_id.txt"
$ZT2 dump
mv zerotier_dump.txt "$TEST_FILEPATH_PREFIX-node-dump-$node2_id.txt"
# Copy ZeroTier stdout/stderr logs
cp node_1.log "$TEST_FILEPATH_PREFIX-node-log-$node1_id.txt"
cp node_2.log "$TEST_FILEPATH_PREFIX-node-log-$node2_id.txt"
# Generate report
cat $FILENAME_MEMORY_LOG
DEFINITELY_LOST=$(xmlstarlet sel -t -v '/valgrindoutput/error/xwhat' \
$FILENAME_MEMORY_LOG | grep "definitely" | awk '{print $1;}')
POSSIBLY_LOST=$(xmlstarlet sel -t -v '/valgrindoutput/error/xwhat' \
$FILENAME_MEMORY_LOG | grep "possibly" | awk '{print $1;}')
# Generate coverage report artifact and summary
FILENAME_COVERAGE_JSON="$TEST_FILEPATH_PREFIX-coverage.json"
FILENAME_COVERAGE_HTML="$TEST_FILEPATH_PREFIX-coverage.html"
echo -e "\nGenerating coverage test report..."
gcovr -r . --exclude ext --json-summary $FILENAME_COVERAGE_JSON \
--html >$FILENAME_COVERAGE_HTML
cat $FILENAME_COVERAGE_JSON
COVERAGE_LINE_COVERED=$(cat $FILENAME_COVERAGE_JSON | jq .line_covered)
COVERAGE_LINE_TOTAL=$(cat $FILENAME_COVERAGE_JSON | jq .line_total)
COVERAGE_LINE_PERCENT=$(cat $FILENAME_COVERAGE_JSON | jq .line_percent)
COVERAGE_LINE_COVERED="${COVERAGE_LINE_COVERED:-0}"
COVERAGE_LINE_TOTAL="${COVERAGE_LINE_TOTAL:-0}"
COVERAGE_LINE_PERCENT="${COVERAGE_LINE_PERCENT:-0}"
# Default values
DEFINITELY_LOST="${DEFINITELY_LOST:-0}"
POSSIBLY_LOST="${POSSIBLY_LOST:-0}"
ping_loss_percent_1_to_2="${ping_loss_percent_1_to_2:-100.0}"
ping_loss_percent_2_to_1="${ping_loss_percent_2_to_1:-100.0}"
time_to_both_nodes_online="${time_to_both_nodes_online:--1}"
# Summarize and emit json for trend reporting
FILENAME_SUMMARY="$TEST_FILEPATH_PREFIX-summary.json"
time_length_test=$((time_test_end - time_test_start))
if [[ $time_to_both_nodes_online != -1 ]];
then
time_to_both_nodes_online=$((time_to_both_nodes_online - time_test_start))
fi
#time_length_zt_join=$((time_zt_join_end-time_zt_join_start))
#time_length_zt_leave=$((time_zt_leave_end-time_zt_leave_start))
#time_length_zt_can_still_ping=$((time_zt_can_still_ping-time_zt_leave_start))
summary=$(
cat <<EOF
{
"version":"$ZTO_VER",
"commit":"$ZTO_COMMIT",
"arch_m":"$(uname -m)",
"arch_a":"$(uname -a)",
"binary_size":"$(stat -c %s zerotier-one)",
"time_length_test":$time_length_test,
"time_to_both_nodes_online":$time_to_both_nodes_online,
"num_possible_bytes_lost": $POSSIBLY_LOST,
"num_definite_bytes_lost": $DEFINITELY_LOST,
"num_bad_formattings": $POSSIBLY_LOST,
"coverage_lines_covered": $COVERAGE_LINE_COVERED,
"coverage_lines_total": $COVERAGE_LINE_TOTAL,
"coverage_lines_percent": $COVERAGE_LINE_PERCENT,
"ping_loss_percent_1_to_2": $ping_loss_percent_1_to_2,
"ping_loss_percent_2_to_1": $ping_loss_percent_2_to_1,
"test_exit_code": $1,
"test_exit_reason":"$2"
}
EOF
)
echo $summary >$FILENAME_SUMMARY
cat $FILENAME_SUMMARY
exit 0
}
################################################################################
# CLI Check #
################################################################################
spam_cli() {
echo "Spamming CLI..."
# Rapidly spam the CLI with joins/leaves
MAX_TRIES="${1:-10}"
for ((s = 0; s <= MAX_TRIES; s++)); do
$ZT1 status
$ZT2 status
sleep 0.1
done
SPAM_TRIES=128
for ((s = 0; s <= SPAM_TRIES; s++)); do
$ZT1 join $TEST_NETWORK
done
for ((s = 0; s <= SPAM_TRIES; s++)); do
$ZT1 leave $TEST_NETWORK
done
for ((s = 0; s <= SPAM_TRIES; s++)); do
$ZT1 leave $TEST_NETWORK
$ZT1 join $TEST_NETWORK
done
}
################################################################################
# Check for proper exit on load of invalid identity #
################################################################################
check_exit_on_invalid_identity() {
echo "Checking ZeroTier exits on invalid identity..."
mkdir -p $(pwd)/exit_test
ZT1="sudo ./zerotier-one -p9999 $(pwd)/exit_test"
echo "asdfasdfasdfasdf" > $(pwd)/exit_test/identity.secret
echo "asdfasdfasdfasdf" > $(pwd)/exit_test/authtoken.secret
echo "Launch ZeroTier with an invalid identity"
$ZT1 &
my_pid=$!
echo "Waiting 5 seconds"
sleep 5
# check if process is running
kill -0 $my_pid
if [ $? -eq 0 ]; then
exit_test_and_generate_report $TEST_FAIL "Exit test FAILED: Process still running after being fed an invalid identity"
fi
}
################################################################################
# Check that we're binding to the primary port for TCP/TCP6/UDP #
################################################################################
check_bind_to_correct_ports() {
PORT_NUMBER=$1
echo "Checking bound ports:"
sudo netstat -anp | grep "$PORT_NUMBER" | grep "zerotier"
if [[ $(sudo netstat -anp | grep "$PORT_NUMBER" | grep "zerotier" | grep "tcp") ]];
then
:
else
exit_test_and_generate_report $TEST_FAIL "ZeroTier did not bind to tcp/$1"
fi
if [[ $(sudo netstat -anp | grep "$PORT_NUMBER" | grep "zerotier" | grep "tcp6") ]];
then
:
else
exit_test_and_generate_report $TEST_FAIL "ZeroTier did not bind to tcp6/$1"
fi
if [[ $(sudo netstat -anp | grep "$PORT_NUMBER" | grep "zerotier" | grep "udp") ]];
then
:
else
exit_test_and_generate_report $TEST_FAIL "ZeroTier did not bind to udp/$1"
fi
}
test "$@"

View file

@ -1,24 +0,0 @@
#!/bin/bash
################################################################################
# Set exit code depending on tool reports #
################################################################################
DEFINITELY_LOST=$(cat *test-results/*summary.json | jq .num_definite_bytes_lost)
EXIT_CODE=$(cat *test-results/*summary.json | jq .exit_code)
EXIT_REASON=$(cat *test-results/*summary.json | jq .exit_reason)
cat *test-results/*summary.json
echo -e "\nBytes of memory definitely lost: $DEFINITELY_LOST"
if [[ "$DEFINITELY_LOST" -gt 0 ]]; then
exit 1
fi
# Catch-all for other non-zero exit codes
if [[ "$EXIT_CODE" -gt 0 ]]; then
echo "Test failed: $EXIT_REASON"
exit 1
fi

View file

@ -1,57 +0,0 @@
on:
pull_request:
push:
workflow_dispatch:
jobs:
build_ubuntu:
runs-on: ubuntu-latest
steps:
- name: gitconfig
run: |
git config --global core.autocrlf input
- name: checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
target: x86_64-unknown-linux-gnu
override: true
components: rustfmt, clippy
- name: Set up cargo cache
uses: Swatinem/rust-cache@v2
continue-on-error: false
with:
key: ${{ runner.os }}-cargo-${{ hashFiles('zeroidc//Cargo.lock') }}
shared-key: ${{ runner.os }}-cargo-
workspaces: |
zeroidc/
- name: validate-1m-linux
env:
CC: 'gcc'
CXX: 'g++'
BRANCH: ${{ github.ref_name }}
run: |
sudo apt install -y valgrind xmlstarlet gcovr iperf3 tree
make one ZT_COVERAGE=1 ZT_TRACE=1
sudo chmod +x ./.github/workflows/validate-linux.sh
sudo ./.github/workflows/validate-linux.sh
- name: Archive test results
uses: actions/upload-artifact@v4
with:
name: ${{github.sha}}-test-results
path: "*test-results*"
- name: final-report
run: |
sudo chmod +x ./.github/workflows/validate-report.sh
sudo ./.github/workflows/validate-report.sh

13
.gitignore vendored
View file

@ -6,11 +6,6 @@
/zerotier
/nltest
# IDE stuff
/.idea
/.nova
/compile_commands.json
# OS-created garbage files from various platforms
.DS_Store
.Apple*
@ -35,6 +30,7 @@ Thumbs.db
/windows/WebUIWrapper/obj
/windows/lib
/ext/installfiles/windows/ZeroTier One-SetupFiles
/ext/installfiles/windows/Prerequisites
/ext/installfiles/windows/*-cache
/ZeroTier One.msi
*.vcxproj.backup
@ -62,6 +58,7 @@ zt1-src.tar.gz
*.opensdf
*.user
*.cache
*.obj
*.tlog
*.pid
*.pkg
@ -106,6 +103,7 @@ windows/ZeroTierOne/Debug/
*.swp
*~.nib
DerivedData/
build/
*.pbxuser
*.mode1v3
*.mode2v3
@ -116,6 +114,7 @@ DerivedData/
!default.perspectivev3
*.xccheckout
xcuserdata/
ext/librethinkdbxx/build
.vscode
__pycache__
*~
@ -123,8 +122,6 @@ attic/world/*.c25519
attic/world/mkworld
workspace/
workspace2/
zeroidc/target/
tcp-proxy/target
#snapcraft specifics
/parts/
@ -138,5 +135,3 @@ __pycache__
*.pyc
*_source.tar.bz2
snap/.snapcraft
tcp-proxy/tcp-proxy
rustybits/target

14
.kick
View file

@ -1,14 +0,0 @@
kick
kick
kick
kick
kick
kick
kick
kick
kick
kick
kick
kick
kick
kick

View file

@ -1,23 +1,24 @@
# vim: ft=dockerfile
FROM debian:bookworm
FROM debian:buster as stage
ARG PACKAGE_BASEURL=https://download.zerotier.com/debian/buster/pool/main/z/zerotier-one/
ARG ARCH=amd64
ARG VERSION
RUN apt-get update -qq && apt-get install curl gpg -y
RUN mkdir -p /usr/share/zerotier && \
curl -o /usr/share/zerotier/tmp.asc "https://download.zerotier.com/contact%40zerotier.com.gpg" && \
gpg --no-default-keyring --keyring /usr/share/zerotier/zerotier.gpg --import /usr/share/zerotier/tmp.asc && \
rm -f /usr/share/zerotier/tmp.asc && \
echo "deb [signed-by=/usr/share/zerotier/zerotier.gpg] http://download.zerotier.com/debian/bookworm bookworm main" > /etc/apt/sources.list.d/zerotier.list
RUN apt-get update -qq && apt-get install curl -y
RUN curl -sSL -o zerotier-one.deb "${PACKAGE_BASEURL}/zerotier-one_${VERSION}_${ARCH}.deb"
RUN apt-get update -qq && apt-get install zerotier-one=${VERSION} curl iproute2 net-tools iputils-ping openssl libssl3 -y
FROM debian:buster
COPY --from=stage zerotier-one.deb .
RUN dpkg -i zerotier-one.deb && rm -f zerotier-one.deb
RUN echo "${VERSION}" >/etc/zerotier-version
RUN rm -rf /var/lib/zerotier-one
COPY entrypoint.sh.release /entrypoint.sh
RUN chmod 755 /entrypoint.sh
HEALTHCHECK --interval=1s CMD bash /healthcheck.sh
CMD []
ENTRYPOINT ["/entrypoint.sh"]

365
Jenkinsfile vendored Normal file
View file

@ -0,0 +1,365 @@
pipeline {
options {
disableConcurrentBuilds()
preserveStashes(buildCount: 10)
timestamps()
}
parameters {
booleanParam(name: "BUILD_ALL", defaultValue: false, description: "Build all supported platform/architecture combos. Defaults to x86/x64 only")
}
agent none
stages {
stage ("Build") {
steps {
script {
def tasks = [:]
tasks << buildStaticBinaries()
tasks << buildDebianNative()
tasks << buildCentosNative()
parallel tasks
}
}
}
stage ("Package Static") {
steps {
script {
parallel packageStatic()
}
}
}
}
}
def buildStaticBinaries() {
def tasks = [:]
def dist = ["alpine"]
def archs = []
if (params.BUILD_ALL == true) {
archs = ["arm64", "amd64", "i386", "armhf", "armel", "ppc64le", "s390x"]
} else {
archs = ["amd64", "i386"]
}
tasks << getTasks(dist, archs, { distro, platform ->
def myNode = {
node ('linux-build') {
dir ("build") {
checkout scm
}
sh "echo ${distro}-${platform}"
def runtime = docker.image("ztbuild/${distro}-${platform}:latest")
runtime.inside {
dir("build") {
sh 'make -j8 ZT_STATIC=1 all'
sh "file ./zerotier-one"
sh "mv zerotier-one zerotier-one-static-${platform}"
stash includes: 'zerotier-one-static-*', name: "static-${platform}"
}
cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true
}
}
}
return myNode
})
return tasks
}
def getTasks(axisDistro, axisPlatform, task) {
def tasks = [:]
for(int i=0; i< axisDistro.size(); i++) {
def axisDistroValue = axisDistro[i]
for(int j=0; j< axisPlatform.size(); j++) {
def axisPlatformValue = axisPlatform[j]
tasks["${axisDistroValue}/${axisPlatformValue}"] = task(axisDistroValue, axisPlatformValue)
}
}
return tasks
}
def packageStatic() {
def tasks = [:]
def centos6 = ["centos6"]
def centos6Arch = ["i386", "amd64"]
tasks << getTasks(centos6, centos6Arch, { distro, arch ->
def myNode = {
node ('linux-build') {
dir ("build") {
checkout scm
}
def runtime = docker.image("ztbuild/${distro}-${arch}:latest")
runtime.inside {
dir("build") {
unstash "static-${arch}"
sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one"
sh "make redhat"
sh "mkdir -p ${distro}"
sh "cp -av `find ~/rpmbuild/ -type f -name \"*.rpm\"` ${distro}/"
archiveArtifacts artifacts: "${distro}/*.rpm", onlyIfSuccessful: true
}
}
cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true
}
}
return myNode
})
def centos7 = ["centos7"]
def centos7Arch = ["i386"]
tasks << getTasks(centos7, centos7Arch, { distro, arch ->
def myNode = {
node ('linux-build') {
dir ("build") {
checkout scm
}
def runtime = docker.image("ztbuild/${distro}-${arch}:latest")
runtime.inside {
dir("build") {
unstash "static-${arch}"
sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one"
sh "make redhat"
sh "mkdir -p ${distro}"
sh "cp -av `find ~/rpmbuild/ -type f -name \"*.rpm\"` ${distro}/"
archiveArtifacts artifacts: "${distro}/*.rpm", onlyIfSuccessful: true
}
}
cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true
}
}
return myNode
})
if (params.BUILD_ALL == true) {
def clefos7 = ["clefos"]
def clefos7Arch = ["s390x"]
tasks << getTasks(clefos7, clefos7Arch, { distro, arch ->
def myNode = {
node ('linux-build') {
dir ("build") {
checkout scm
}
def runtime = docker.image("ztbuild/${distro}-${arch}:latest")
runtime.inside {
dir("build/") {
unstash "static-${arch}"
sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one"
sh "make redhat"
sh "mkdir -p ${distro}"
sh "cp -av `find ~/rpmbuild/ -type f -name \"*.rpm\"` ${distro}/"
archiveArtifacts artifacts: "${distro}/*.rpm", onlyIfSuccessful: true
}
}
cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true
}
}
return myNode
})
}
def debianJessie = ["debian-jessie"]
def debianJessieArchs = []
if (params.BUILD_ALL == true) {
debianJessieArch = ["armhf", "armel", "amd64", "i386"]
} else {
debianJessieArch = ["amd64", "i386"]
}
tasks << getTasks(debianJessie, debianJessieArch, { distro, arch ->
def myNode = {
node ('linux-build') {
dir ("build") {
checkout scm
}
def runtime = docker.image("ztbuild/${distro}-${arch}:latest")
runtime.inside {
sh "ls -la ."
dir('build/') {
sh "ls -la ."
unstash "static-${arch}"
sh "pwd"
sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one && file ./zerotier-one"
sh "mv -f debian/rules.static debian/rules"
sh "make debian"
}
sh "mkdir -p ${distro}"
sh "mv *.deb ${distro}"
archiveArtifacts artifacts: "${distro}/*.deb", onlyIfSuccessful: true
}
cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true
}
}
return myNode
})
def ubuntuTrusty = ["ubuntu-trusty"]
def ubuntuTrustyArch = []
if (params.BUILD_ALL == true) {
ubuntuTrustyArch = ["i386", "amd64", "armhf", "arm64", "ppc64le"]
} else {
ubuntuTrustyArch = ["i386", "amd64"]
}
tasks << getTasks(ubuntuTrusty, ubuntuTrustyArch, { distro, arch ->
def myNode = {
node ('linux-build') {
dir ("build") {
checkout scm
}
def runtime = docker.image("ztbuild/${distro}-${arch}:latest")
runtime.inside {
sh "ls -la ."
dir('build/') {
sh "ls -la ."
unstash "static-${arch}"
sh "pwd"
sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one && file ./zerotier-one"
sh "mv -f debian/rules.static debian/rules"
sh "make debian"
}
sh "mkdir -p ${distro}"
sh "mv *.deb ${distro}"
archiveArtifacts artifacts: "${distro}/*.deb", onlyIfSuccessful: true
}
cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true
}
}
return myNode
})
def debianWheezy = ["debian-wheezy"]
def debianWheezyArchs = []
if (params.BUILD_ALL == true) {
debianWheezyArchs = ["armhf", "armel", "amd64", "i386"]
} else {
debianWheezyArchs = ["amd64", "i386"]
}
tasks << getTasks(debianJessie, debianJessieArch, { distro, arch ->
def myNode = {
node ('linux-build') {
dir ("build") {
checkout scm
}
def runtime = docker.image("ztbuild/${distro}-${arch}:latest")
runtime.inside {
dir('build/') {
unstash "static-${arch}"
sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one && file ./zerotier-one"
sh "mv -f debian/rules.wheezy.static debian/rules"
sh "mv -f debian/control.wheezy debian/control"
sh "make debian"
}
sh "mkdir -p ${distro}"
sh "mv *.deb ${distro}"
archiveArtifacts artifacts: "${distro}/*.deb", onlyIfSuccessful: true
}
cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true
}
}
return myNode
})
return tasks
}
def buildDebianNative() {
def tasks = [:]
def buster = ["debian-buster", "debian-stretch", "debian-bullseye", "debian-sid"]
def busterArchs = []
if (params.BUILD_ALL) {
busterArchs = ["s390x", "ppc64le", "i386", "armhf", "armel", "arm64", "amd64"]
} else {
busterArchs = ["amd64", "i386"]
}
def build = { distro, arch ->
def myNode = {
node ('linux-build') {
dir ("build") {
checkout scm
}
def runtime = docker.image("ztbuild/${distro}-${arch}:latest")
runtime.inside {
dir("build") {
sh 'make debian'
}
sh "mkdir -p ${distro}"
sh "mv *.deb ${distro}"
archiveArtifacts artifacts: "${distro}/*.deb", onlyIfSuccessful: true
cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true
}
}
}
return myNode
}
tasks << getTasks(buster, busterArchs, build)
// bash is broken when running under QEMU-s390x on Xenial
def xenial = ["ubuntu-xenial"]
def xenialArchs = []
if (params.BUILD_ALL == true) {
xenialArchs = ["i386", "amd64", "armhf", "arm64", "ppc64le"]
} else {
xenialArchs = ["i386", "amd64"]
}
tasks << getTasks(xenial, xenialArchs, build)
def ubuntu = ["ubuntu-bionic", "ubuntu-eoan"]
def ubuntuArchs = []
if (params.BUILD_ALL == true) {
ubuntuArchs = ["i386", "amd64", "armhf", "arm64", "ppc64le", "s390x"]
} else {
ubuntuArchs = ["i386", "amd64"]
}
tasks << getTasks(ubuntu, ubuntuArchs, build)
def kali = ["kali-rolling"]
def kaliArchs = ["amd64"]
tasks << getTasks(kali, kaliArchs, build)
return tasks
}
def buildCentosNative() {
def tasks = [:]
def build = { distro, arch ->
def myNode = {
node ('linux-build') {
dir ("build") {
checkout scm
}
def runtime = docker.image("ztbuild/${distro}-${arch}:latest")
runtime.inside {
dir("build") {
sh 'make -j4'
sh 'make redhat'
sh "mkdir -p ${distro}"
sh "cp -av `find ~/rpmbuild/ -type f -name \"*.rpm\"` ${distro}/"
archiveArtifacts artifacts: "${distro}/*.rpm", onlyIfSuccessful: true
}
cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true
}
}
}
return myNode
}
def centos8 = ["centos8"]
def centos8Archs = []
if (params.BUILD_ALL == true) {
centos8Archs = ["amd64", "arm64", "ppc64le"]
} else {
centos8Archs = ["amd64"]
}
tasks << getTasks(centos8, centos8Archs, build)
def centos7 = ["centos7"]
def centos7Archs = ["amd64"]
tasks << getTasks(centos7, centos7Archs, build)
return tasks
}

View file

@ -26,7 +26,7 @@ Additional Use Grant: You may make use of the Licensed Work, provided you
ZeroTier behind the scenes to operate a service not
related to ZeroTier network administration.
* Create Non-Open-Source Commercial Derivative Works
* Create Non-Open-Source Commercial Derviative Works
(2) Link or directly include the Licensed Work in a
commercial or for-profit application or other product
@ -47,7 +47,7 @@ Additional Use Grant: You may make use of the Licensed Work, provided you
services, social welfare, senior care, child care, and
the care of persons with disabilities.
Change Date: 2026-01-01
Change Date: 2025-01-01
Change License: Apache License version 2.0 as published by the Apache
Software Foundation

View file

@ -26,8 +26,3 @@ endif
ifeq ($(OSTYPE),NetBSD)
include make-netbsd.mk
endif
drone:
@echo "rendering .drone.yaml from .drone.jsonnet"
drone jsonnet --format --stream
drone sign zerotier/ZeroTierOne --save

View file

@ -14,7 +14,6 @@ The version must be incremented in all of the following files:
/debian/changelog
/ext/installfiles/mac/ZeroTier One.pkgproj
/ext/installfiles/windows/ZeroTier One.aip
../DesktopUI/mac-app-template/ZeroTier.app/Contents/Info.plist
The final .AIP file can only be edited on Windows with [Advanced Installer Enterprise](http://www.advancedinstaller.com/). In addition to incrementing the version be sure that a new product code is generated. (The "upgrade code" GUID on the other hand must never change.)

View file

@ -60,11 +60,9 @@ To ensure you have a network available before trying to listen on it. Without pr
You can control a few settings including the identity used and the authtoken used to interact with the control socket (which you can forward and access through `localhost:9993`).
- `ZEROTIER_JOIN_NETWORKS`: additional way to set networks to join.
- `ZEROTIER_API_SECRET`: replaces the `authtoken.secret` before booting and allows you to manage the control socket's authentication key.
- `ZEROTIER_IDENTITY_PUBLIC`: the `identity.public` file for zerotier-one. Use `zerotier-idtool` to generate one of these for you.
- `ZEROTIER_IDENTITY_SECRET`: the `identity.secret` file for zerotier-one. Use `zerotier-idtool` to generate one of these for you.
- `ZEROTIER_LOCAL_CONF`: Sets the the `local.conf` file content for zerotier-one
### Tips

110
README.md
View file

@ -1,7 +1,6 @@
ZeroTier - Global Area Networking
======
*This document is written for a software developer audience. For information on using ZeroTier, see the: [Website](https://www.zerotier.com), [Documentation Site](https://docs.zerotier.com), and [Discussion Forum](https://discuss.zerotier.com).*
This document is written for a software developer audience. For information on using ZeroTier, see the: [Website](https://www.zerotier.com), [Documentation Site](https://docs.zerotier.com), and [Discussion Forum](https://discuss.zerotier.com)
ZeroTier is a smart programmable Ethernet switch for planet Earth. It allows all networked devices, VMs, containers, and applications to communicate as if they all reside in the same physical data center or cloud region.
@ -37,41 +36,30 @@ The base path contains the ZeroTier One service main entry point (`one.cpp`), se
- `ext/`: third party libraries, binaries that we ship for convenience on some platforms (Mac and Windows), and installation support files.
- `include/`: include files for the ZeroTier core.
- `java/`: a JNI wrapper used with our Android mobile app. (The whole Android app is not open source but may be made so in the future.)
- `macui/`: a Macintosh menu-bar app for controlling ZeroTier One, written in Objective C.
- `node/`: the ZeroTier virtual Ethernet switch core, which is designed to be entirely separate from the rest of the code and able to be built as a stand-alone OS-independent library. Note to developers: do not use C++11 features in here, since we want this to build on old embedded platforms that lack C++11 support. C++11 can be used elsewhere.
- `osdep/`: code to support and integrate with OSes, including platform-specific stuff only built for certain targets.
- `rule-compiler/`: JavaScript rules language compiler for defining network-level rules.
- `service/`: the ZeroTier One service, which wraps the ZeroTier core and provides VPN-like connectivity to virtual networks for desktops, laptops, servers, VMs, and containers.
- `windows/`: Visual Studio solution files, Windows service code, and the Windows task bar app UI.
- `zeroidc/`: OIDC implementation used by ZeroTier service to log into SSO-enabled networks. (This part is written in Rust, and more Rust will be appearing in this repository in the future.)
### Contributing
Please do pull requests off of the `dev` branch.
Releases are done by merging `dev` into `main` and then tagging and doing builds.
### Build and Platform Notes
To build on Mac and Linux just type `make`. On FreeBSD and OpenBSD `gmake` (GNU make) is required and can be installed from packages or ports. For Windows there is a Visual Studio solution in `windows/`.
- **Mac**
- Xcode command line tools for macOS 10.13 or newer are required.
- Rust for x86_64 and ARM64 targets *if SSO is enabled in the build*.
- Xcode command line tools for OSX 10.8 or newer are required.
- **Linux**
- The minimum compiler versions required are GCC/G++ 8.x or CLANG/CLANG++ 5.x.
- The minimum compiler versions required are GCC/G++ 4.9.3 or CLANG/CLANG++ 3.4.2. (Install `clang` on CentOS 7 as G++ is too old.)
- Linux makefiles automatically detect and prefer clang/clang++ if present as it produces smaller and slightly faster binaries in most cases. You can override by supplying CC and CXX variables on the make command line.
- Rust for x86_64 and ARM64 targets *if SSO is enabled in the build*.
- **Windows**
- Visual Studio 2022 on Windows 10 or newer.
- Rust for x86_64 and ARM64 targets *if SSO is enabled in the build*.
- Windows 7 or newer is supported. This *may* work on Vista but isn't officially supported there. It will not work on Windows XP.
- We build with Visual Studio 2017. Older versions may not work. Clang or MinGW will also probably work but may require some makefile hacking.
- **FreeBSD**
- GNU make is required. Type `gmake` to build.
- `binutils` is required. Type `pkg install binutils` to install.
- Rust for x86_64 and ARM64 targets *if SSO is enabled in the build*.
- **OpenBSD**
- There is a limit of four network memberships on OpenBSD as there are only four tap devices (`/dev/tap0` through `/dev/tap3`).
- GNU make is required. Type `gmake` to build.
- Rust for x86_64 and ARM64 targets *if SSO is enabled in the build*.
Typing `make selftest` will build a *zerotier-selftest* binary which unit tests various internals and reports on a few aspects of the build environment. It's a good idea to try this on novel platforms or architectures.
@ -87,14 +75,14 @@ On most distributions, macOS, and Windows, the installer will start the service
A home folder for your system will automatically be created.
The service is controlled via the JSON API, which by default is available at `127.0.0.1:9993`. It also listens on `0.0.0.0:9993` which is only usable if `allowManagementFrom` is properly configured in `local.conf`. We include a *zerotier-cli* command line utility to make API calls for standard things like joining and leaving networks. The *authtoken.secret* file in the home folder contains the secret token for accessing this API. See [service/README.md](service/README.md) for API documentation.
The service is controlled via the JSON API, which by default is available at 127.0.0.1 port 9993. We include a *zerotier-cli* command line utility to make API calls for standard things like joining and leaving networks. The *authtoken.secret* file in the home folder contains the secret token for accessing this API. See [service/README.md](service/README.md) for API documentation.
Here's where home folders live (by default) on each OS:
* **Linux**: `/var/lib/zerotier-one`
* **FreeBSD** / **OpenBSD**: `/var/db/zerotier-one`
* **Mac**: `/Library/Application Support/ZeroTier/One`
* **Windows**: `\ProgramData\ZeroTier\One` (That's the default. The base 'shared app data' folder might be different if Windows is installed with a non-standard drive letter assignment or layout.)
* **Windows**: `\ProgramData\ZeroTier\One` (That's for Windows 7. The base 'shared app data' folder might be different on different Windows versions.)
### Basic Troubleshooting
@ -110,88 +98,8 @@ On CentOS check `/etc/sysconfig/iptables` for IPTables rules. For other distribu
ZeroTier One peers will automatically locate each other and communicate directly over a local wired LAN *if UDP port 9993 inbound is open*. If that port is filtered, they won't be able to see each others' LAN announcement packets. If you're experiencing poor performance between devices on the same physical network, check their firewall settings. Without LAN auto-location peers must attempt "loopback" NAT traversal, which sometimes fails and in any case requires that every packet traverse your external router twice.
Users behind certain types of firewalls and "symmetric" NAT devices may not be able to connect to external peers directly at all. ZeroTier has limited support for port prediction and will *attempt* to traverse symmetric NATs, but this doesn't always work. If P2P connectivity fails you'll be bouncing UDP packets off our relay servers resulting in slower performance. Some NAT router(s) have a configurable NAT mode, and setting this to "full cone" will eliminate this problem. If you do this you may also see a magical improvement for things like VoIP phones, Skype, BitTorrent, WebRTC, certain games, etc., since all of these use NAT traversal techniques similar to ours.
Users behind certain types of firewalls and "symmetric" NAT devices may not able able to connect to external peers directly at all. ZeroTier has limited support for port prediction and will *attempt* to traverse symmetric NATs, but this doesn't always work. If P2P connectivity fails you'll be bouncing UDP packets off our relay servers resulting in slower performance. Some NAT router(s) have a configurable NAT mode, and setting this to "full cone" will eliminate this problem. If you do this you may also see a magical improvement for things like VoIP phones, Skype, BitTorrent, WebRTC, certain games, etc., since all of these use NAT traversal techniques similar to ours.
If a firewall between you and the Internet blocks ZeroTier's UDP traffic, you will fall back to last-resort TCP tunneling to rootservers over port 443 (https impersonation). This will work almost anywhere but is *very slow* compared to UDP or direct peer to peer connectivity.
Additional help can be found in our [knowledge base](https://zerotier.atlassian.net/wiki/spaces/SD/overview).
### Prometheus Metrics
Prometheus Metrics are available at the `/metrics` API endpoint. This endpoint is protected by an API key stored in `metricstoken.secret` to prevent unwanted information leakage. Information that could be gleaned from the metrics include joined networks and peers your instance is talking to.
Access control is via the ZeroTier control interface itself and `metricstoken.secret`. This can be sent as a bearer auth token, via the `X-ZT1-Auth` HTTP header field, or appended to the URL as `?auth=<token>`. You can see the current metrics via `cURL` with the following command:
// Linux
curl -H "X-ZT1-Auth: $(sudo cat /var/lib/zerotier-one/metricstoken.secret)" http://localhost:9993/metrics
// macOS
curl -H "X-ZT1-Auth: $(sudo cat /Library/Application\ Support/ZeroTier/One/metricstoken.secret)" http://localhost:9993/metrics
// Windows PowerShell (Admin)
Invoke-RestMethod -Headers @{'X-ZT1-Auth' = "$(Get-Content C:\ProgramData\ZeroTier\One\metricstoken.secret)"; } -Uri http://localhost:9993/metrics
To configure a scrape job in Prometheus on the machine ZeroTier is running on, add this to your Prometheus `scrape_config`:
- job_name: zerotier-one
honor_labels: true
scrape_interval: 15s
metrics_path: /metrics
static_configs:
- targets:
- 127.0.0.1:9993
labels:
group: zerotier-one
node_id: $YOUR_10_CHARACTER_NODE_ID
authorization:
credentials: $YOUR_METRICS_TOKEN_SECRET
If neither of these methods are desirable, it is probably possible to distribute metrics via [Prometheus Proxy](https://github.com/pambrose/prometheus-proxy) or some other tool. Note: We have not tested this internally, but will probably work with the correct configuration.
Metrics are also available on disk in ZeroTier's working directory:
// Linux
/var/lib/zerotier-one/metrics.prom
// macOS
/Library/Application\ Support/ZeroTier/One/metrics.prom
//Windows
C:\ProgramData\ZeroTier\One\metrics.prom
#### Available Metrics
| Metric Name | Labels | Metric Type | Description |
| --- | --- | --- | --- |
| zt_packet | packet_type, direction | Counter | ZeroTier packet type counts |
| zt_packet_error | error_type, direction | Counter | ZeroTier packet errors|
| zt_data | protocol, direction | Counter | number of bytes ZeroTier has transmitted or received |
| zt_num_networks | | Gauge | number of networks this instance is joined to |
| zt_network_multicast_groups_subscribed | network_id | Gauge | number of multicast groups networks are subscribed to |
| zt_network_packets | network_id, direction | Counter | number of incoming/outgoing packets per network |
| zt_peer_latency | node_id | Histogram | peer latency (ms) |
| zt_peer_path_count | node_id, status | Gauge | number of paths to peer |
| zt_peer_packets | node_id, direction | Counter | number of packets to/from a peer |
| zt_peer_packet_errors | node_id | Counter | number of incoming packet errors from a peer |
If there are other metrics you'd like to see tracked, ask us in an Issue or send us a Pull Request!
### HTTP / App server
There is a static http file server suitable for hosting Single Page Apps at http://localhost:9993/app/<app-path>
Use `zerotier-cli info -j` to find your zerotier-one service's homeDir
``` sh
cd $ZT_HOME
sudo mkdir -p app/app1
sudo mkdir -p app/appB
echo '<html><meta charset=utf-8><title>appA</title><body><h1>hello world A' | sudo tee app/appA/index.html
echo '<html><meta charset=utf-8><title>app2</title><body><h1>hello world 2' | sudo tee app/app2/index.html
curl -sL http://localhost:9993/app/appA http://localhost:9993/app/app2
```
Then visit [http://localhost:9993/app/app1/](http://localhost:9993/app/app1/) and [http://localhost:9993/app/appB/](http://localhost:9993/app/appB/)
Requests to paths don't exist return the app root index.html, as is customary for SPAs.
If you want, you can write some javascript that talks to the service or controller [api](https://docs.zerotier.com/service/v1).

View file

@ -1,168 +1,7 @@
ZeroTier Release Notes
======
# 2024-10-23 -- Version 1.14.2
* Fix for missing entitlement on macOS Sequoia.
* Fix for a problem correctly parsing local.conf to enable low bandwidth mode.
* Increment versions of some dependent libraries.
* Other fixes.
# 2024-09-12 -- Version 1.14.1
* Multithreaded packet I/O support! Currently this is just for Linux and must
be enabled in local.conf. It will likely make the largest difference on small
multi-core devices where CPU is a bottleneck and high throughput is desired.
It may be enabled by default in the future but we want it to be thoroughly
tested. It's a little harder than it seems at first glance due to the need
to keep packets in sequence and balance load.
* Several multipath bug fixes.
* Updated the versions on a number of libraries related to OIDC support and HTTP.
* MacOS .app now shows the correct version in its Info.plist manifest.
* Sanitize MAC addresses in JSON format rules parser.
* Some basic information about the platform (OS, CPU architecture) is now reported
to network controllers when networks are joined so it can be displayed to
network admins and in the future used in policy checking and inventory operations.
# 2024-05-02 -- Version 1.14.0
* Linux I/O performance improvements under heavy load
* Improvements to multipath
* Fix for port rebinding "coma" bug after periods offline (some laptop users)
* Fixed a rules engine quirk/ambiguity (GitHub Issue #2200)
* Controller API enhancements: node names and other node meta-data
* Other bug fixes
# 2023-09-12 -- Version 1.12.2
* More improvements to macOS full tunnel mode.
* Faster recovery after changes to physical network settings.
# 2023-08-25 -- Version 1.12.1
* Minor release to fix a port binding issue in Linux.
* Update Debian dependencies.
* No changes for other platforms.
# 2023-08-23 -- Version 1.12.0
* Experimental Windows ARM64 support
* Fix numerous sleep/wake issues on macOS and other platforms
* Faster recovery after changes to physical network settings
* Prometheus compatible metrics support!
* Fix full tunnel mode on recent macOS versions
* Numerous macOS DNS fixes
* 10-30% speed improvement on Linux
# 2023-03-23 -- Version 1.10.6
* Prevent binding temporary ipv6 addresses on macos (#1910)
* Prevent path-learning loops (#1914)
* Prevent infinite loop of UAC prompts in tray app
# 2023-03-10 -- Version 1.10.5
* Fix for high CPU usage bug on Windows
# 2023-03-07 -- Version 1.10.4
* SECURITY FIX (Windows): this version fixes a file permission problem on
Windows that could allow non-privileged users on a Windows system to read
privileged files in the ZeroTier service's working directory. This could
allow an unprivileged local Windows user to administrate the local ZeroTier
instance without appropriate local permissions. This issue is not remotely
exploitable unless a remote user can read arbitrary local files, and does
not impact other operating systems.
* Fix a bug in the handling of multiple IP address assignments to virtual
interfaces on macOS.
# 2023-02-15 -- Version 1.10.3
* Fix for duplicate paths in client. Could cause connectivity issues. Affects all platforms.
* Fix for Ethernet Tap MTU setting, would not properly apply on Linux.
* Fix default route bugs (macOS.)
* Enable Ping automatically for ZeroTier Adapters (Windows.)
* SSO updates and minor bugfixes.
* Add low-bandwidth mode.
* Add forceTcpRelay mode (optionally enabled.)
* Fix bug that prevented setting of custom TCP relay address.
* Build script improvements and bug fixes.
# 2022-11-01 -- Version 1.10.2
* Fix another SSO "stuck client" issue in zeroidc.
* Expose root-reported external IP/port information via the local JSON API for better diagnostics.
* Multipath: CLI output improvement for inspecting bonds
* Multipath: balance-aware mode
* Multipath: Custom policies
* Multipath: Link quality measurement improvements
Note that releases are coming few and far between because most of our dev effort is going into version 2.
# 2022-06-27 -- Version 1.10.1
* Fix an issue that could cause SSO clients to get "stuck" on stale auth URLs.
* A few other SSO related bug fixes.
# 2022-06-07 -- Version 1.10.0
* Fix formatting problem in `zerotier-cli` when using SSO networks.
* Fix a few other minor bugs in SSO signin to prepare for general availability.
* Remove requirement for webview in desktop UI and instead just make everything available via the tray pulldown/menu. Use [libui-ng](https://github.com/libui-ng/libui-ng) for minor prompt dialogs. Saves space and eliminates installation headaches on Windows.
* Fix SSO "spam" bug in desktop UI.
* Use system default browser for SSO login so all your plugins, MFA devices, password managers, etc. will work as you have them configured.
* Minor fix for bonding/multipath.
# 2022-05-10 -- Version 1.8.10
* Fixed a bug preventing SSO sign-on on Windows.
# 2022-04-25 -- Version 1.8.9
* Fixed a long-standing and strange bug that was causing sporadic "phantom" packet authentication failures. Not a security problem but could be behind sporadic reports of link failures under some conditions.
* Fixed a memory leak in SSO/OIDC support.
* Fixed SSO/OIDC display error on CLI.
* Fixed a bug causing nodes to sometimes fail to push certs to each other (primarily affects SSO/OIDC use cases).
* Fixed a deadlock bug on leaving SSO/OIDC managed networks.
* Added some new Linux distributions to the build subsystem.
# 2022-04-11 -- Version 1.8.8
* Fix a local privilege escalation bug in the Windows installer.
* Dependency fix for some Ubuntu versions.
* No changes for other platforms. Windows upgrade recommended, everyone else optional.
# 2022-03-30 -- Version 1.8.7
* Fix for dependency installations in Windows MSI package.
* Fix for desktop UI setup when run by a non-super-user.
* Bug fix in local OIDC / SSO support for auth0 and other providers.
* Other minor fixes for e.g. old Linux distributions.
# 2022-03-04 -- Version 1.8.6
* Fixed an issue that could cause the UI to be non-responsive if not joined to any networks.
* Fix dependency issues in Debian and RedHat packages for some distributions (Fedora, Mint).
* Bumped the peer cache serialization version to prevent "coma" issues on upgrade due to changes in path logic behaving badly with old values.
# 2022-02-22 -- Version 1.8.5
* Plumbing under the hood for endpoint device SSO support.
* Fix in LinuxEthernetTap to tap device support on very old (2.6) Linux kernels.
* Fix an issue that could cause self-hosted roots ("moons") to fail to assist peers in making direct links. (GitHub issue #1512)
* Merge a series of changes by Joseph Henry (of ZeroTier) that should fix some edge cases where ZeroTier would "forget" valid paths.
* Minor multipath improvements for automatic path negotiation.
# 2021-11-30 -- Version 1.8.4
* Fixed an ugly font problem on some older macOS versions.
* Fixed a bug that could cause the desktop tray app control panel to stop opening after a while on Windows.
* Fixed a possible double "release" in macOS tray app code that crashed on older macOS versions.
* Fixed installation on 32-bit Windows 10.
* Fixed a build flags issue that could cause ZeroTier to crash on older ARM32 CPUs.
# 2021-11-15 -- Version 1.8.3
# 2021-11-15 -- -- Version 1.8.3
* Remove problematic spinlock, which was only used on x86_64 anyway. Just use pthread always.
* Fix fd leak on MacOS that caused non-responsiveness after some time.
@ -372,7 +211,7 @@ We're trying to fix all these issues before the 1.6.0 release. Stay tuned.
# 2017-04-20 -- Version 1.2.4
* Managed routes are now only bifurcated for the default route. This is a change in behavior, though few people will probably notice. Bifurcating all managed routes was causing more trouble than it was worth for most users.
* Up to 2X crypto speedup on x86-64 (except Windows, which will take some porting) and 32-bit ARM platforms due to integration of fast assembly language implementations of Salsa20/12 from the [supercop](http://bench.cr.yp.to/supercop.html) code base. These were written by Daniel J. Bernstein and are in the public domain. My MacBook Pro (Core i5 2.8ghz) now does almost 1.5GiB/sec Salsa20/12 per core and a Raspberry Pi got a 2X boost. 64-bit ARM support and Windows support will take some work but should not be too hard.
* Up to 2X crypto speedup on x86-64 (except Windows, which will take some porting) and 32-bit ARM platforms due to integration of fast assembly language implementations of Salsa20/12 from the [supercop](http://bench.cr.yp.to/supercop.html) code base. These were written by Daniel J. Bernstein and are in the public domain. My Macbook Pro (Core i5 2.8ghz) now does almost 1.5GiB/sec Salsa20/12 per core and a Raspberry Pi got a 2X boost. 64-bit ARM support and Windows support will take some work but should not be too hard.
* Refactored code that manages credentials to greatly reduce memory use in most cases. This may also result in a small performance improvement.
* Reworked and simplified path selection and priority logic to fix path instability and dead path persistence edge cases. There have been some sporadic reports of persistent path instabilities and dead paths hanging around that take minutes to resolve. These have proven difficult to reproduce in house, but hopefully this will fix them. In any case it seems to speed up path establishment in our tests and it makes the code simpler and more readable.
* Eliminated some unused cruft from the code around path management and in the peer class.

View file

@ -1,93 +0,0 @@
# Security
ZeroTier takes the security of our software products and services seriously, which
includes all source code repositories managed through our GitHub organization.
## Supported Versions
The following versions of ZeroTier One receive security updates
| Version | Supported |
| -------- | ------------------ |
| 1.14.x | :white_check_mark: |
| 1.12.x | :white_check_mark: |
| < 1.12.0 | :x: |
## Reporting a Vulnerability
**Please do not report security issues through public GitHub issues**
Instead, please report vulnerabilities via email to security@zerotier.com. If possible,
please encrypt with our PGP key (see below).
Please include the following information, or as much as you can provide to help us
understand the nature and scope of the issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue
## Preferred Languages
We prefer all communications to be in English.
## security@zerotier.com PGP key
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBGQGOVIBEACalXTnNqaiSOVLFEiqHpDMg8N/OI5D5850Xy1ZEvx3B3rz7cbn
k30ozHtJKbh+vqpyItE7DjyQAuF19gP5Q64Yh0Y+MmLHq60q/GwOwAYz7cI+UzA3
5x8YqcmTp32LAM1xJn+iMlMLBuAmJl4kULKmOXPlpqPiyTFs5saizvm7fgRmfgJJ
HpsnIrTkaDFJhAR+jvMJohVYwmhuydeI0DsHu7KGpG1ddcHDrUjOPNqXnnAPSPwx
llw4yfKlQb8GYErsv/G5QVyzd5+SxEuiI4MARRnrk8LlMQ33CR6pzIQ/Bk5AAmye
mHqfEAknkiOf++urYhRs9BL3Kz3MdV0cg92zr9EFOg0u56jxf5OnAiTOhGUUA0hn
dS7peVGl46R9Oy2JYIazNDGi+4NIsYDFXsnsss9xOQVygPyeQd71zFHfix0jct9w
j3o/kj7Egsnm9nc13354bYT6bbalqXiRWwGH1eAFpjueNWiVFwZS6NZUP3WeNDiY
BlPo1LodvolbXiJcTILTCyEkERJPCK2zoE2nTdVfvTLWsuehw1M6Yd2/q74TVYy/
RY+KjHkrChEBQ9PqXsXRHj6opKbT8JLfZkvU5k+3IiqqxOpB+QXFI/whj493CxWW
so7QAmzOCyJq8GDVPxzkwUac22YIkXdiOmb8i/HWq+kLY/HjQE259Gx6KwARAQAB
tClaZXJvVGllciBTZWN1cml0eSA8c2VjdXJpdHlAemVyb3RpZXIuY29tPokCTAQT
AQoANhYhBH1HQGb+4jzl6mnFqf09m6uqADkABQJkBjlSAhsDBAsJCAcEFQoJCAUW
AgMBAAIeAQIXgAAKCRD9PZurqgA5ACqPD/sFt6SG6Tu0HwTY2ofJtYsa2GBLL0pf
dYlX4cWSs1PVB5+m5Oj18y+GB2umA9GnsVtmvaSfp3XEngt2zNWX27uUsVfL35b2
/5TVVe8RjzOedqMN+lQWMvO+f/C1zmWYXjjpC+iGjgMMaRRrofkkn+7uL4N9y6gY
rcXtpACT1rYFC+i1AKnZfUO8Vr5ji7odq0f7bDkN/N38rB0kRRwEmO8wqdpQK6gK
nxf9vgJl5ggimDk5Xtz1sfd3y28bf5N4hdOCkXUbd10nUFY3wDNTM4VxozxTGJeG
imdcc19Wuw/1fGUZ5SIjgPanCdPLGYwSTr+M6Fuern9uTtlC1GOby3BUtmVGP6EU
1pSAJSRpmoBPHKKOYtSMwV8PCboXru9P1ab8y8STKM3SKyghUJrl17gdc0LaksZa
E54pJudGPIQMFRqZjMdV6jgMuaLTozjZ4mW8EThf4mkX4xDkO8l7cOn0225ZYJZC
lZKpdnwzk9owkJA80u4KBNJxTtB4ZAPzjBsD5hFzCZQTLNQp/psU3EjZsau28eXT
E/C1QjEQHgy4ohkgQlCm1H1+clKssCWcdmsVGXuS1u8gh4K6X9b0Z6LeCGRaQvH2
+DB8oTAdqp9nUZv9rP4pbo+sR4fF67CFLriVuxjedAiFkbM4uHMFcL4tc/X9+DRo
YN5X7oEkZvO507kCDQRkBjlSARAAz58UMF7K1qKyQjzKTcutaYZ5SaIGky9lCLZn
/2vjpFCoBogkxS/6IKQcwZk8b4S9QstaaQZDFEkxqNeKC0GiFTAMAb6SmYcK495h
EZnHl0NA5Nc2dBlZk5E/ENzTCz2bXaxCcVESc2z+xCzu07brbhGrqvliKiwOUzt9
JzqEsar6I95OutBcZvkFCs44/Uf9bS1qf1w4klE8w3vdMtGH23umrET4tFZ+sh6o
ZFtQx0u2eKjsRdn/RMtsxLNaJlcE1DdIAqBpQrcmuwMC8v5wUGfCGZjhClzmyQlq
akUkayir7UtbHbFT/mgO+YI77YGXWk5QrwPscqqT2l8KB/YMujNDmaWa/0KV1lIY
zr5s4dzVeiwqFLR9ANFIhzFwzf3JLi6XSx123Qix0TxZoYPZCHl7yoi9qi6qybz5
0Od2LSz3jbApeKYymZ+zjE+YV5y9DI6Wzy1j2M1FogNvTO9fMk+6dLt4HhTdSNvH
cKya462YCcy+tnZTkhmh+FTebbJlV6D4wG7skE5KCdBhjm53xLwp6XW9L6n2CrkL
W1IDBcCz0oPd1sMkXbO3wnxdXprV2XurCfsg/R2nszSNzvdJ8/xj3cr9hpoJ714R
qqyoEDRZ1Ss9kGL166o5MpN5qb/EewdkqGgWP7YFXbhsdHQiW7Z7dAqzjoaybD4O
nakkwyUAEQEAAYkCNgQYAQoAIBYhBH1HQGb+4jzl6mnFqf09m6uqADkABQJkBjlS
AhsMAAoJEP09m6uqADkAax0P/Rh8EZYRqW6dPYTl1YQusAK10rAcRNq3ekjofXGk
oXK1S7HWGoFgl5++5nfSfNgFJ5VLcgIM56wtIf49zFjWe5oC6fw8k+ghh4d2chMP
hdDILx6e0c30Iq1+EvovGR9hWa0wJ4cKTdzlwhY9ZC09q0ia+bl2mwpie1JQDR0c
zXCjt+PldLeeK9z1/XT0Q7KowYC+U18oR+KFm+EaRV4QT85JVequnIeGkmaHJrHB
lH4T5A5ib7y8edon1c0Zx3GsaxJUojkEJ0SX7ffVDu6ztUZfkHfCVpMW4VzUeGA/
m+CtFO9ciLRGZEkRa+zhIGoBvwEXU0GiwiF4nZ0F2C8UioeW0YIEV9zl3nXJctYE
ZKc2whSENQRTGgaYHVoVZhznt71LKWgFLshwBo81UCXVkzwAjMW1ActDnmPw5M7q
xR5Qp5G49Z1GmfSozazha0HVFPKNV5i3RlTzs4yLUnZyH0yC9IvtOefMHcLjG96L
N5miEV97gvJJjrn8rhRvpUwAWgmT/9IuYjBNQTtNN40arto5HxezR76WCjdKYxdL
p3dM1iiBDShHNm7LdyZlLFhTOMU0tNBxJJ7B09ar5gakeZjD+2aB1ODX9VuFtozL
onBjI2gIkry0UIkuznHfFw05lZAZAiqHEVgVi/WTk4C/bklDZNgE0lx+IWzEz2iS
L455
=lheL
-----END PGP PUBLIC KEY BLOCK-----
```

View file

@ -1,3 +1,3 @@
#!/bin/bash
c++ -std=c++11 -I../.. -I../../ext -I.. -g -o mkworld ../../node/C25519.cpp ../../node/Salsa20.cpp ../../node/SHA512.cpp ../../node/Identity.cpp ../../node/Utils.cpp ../../node/InetAddress.cpp ../../osdep/OSUtils.cpp mkworld.cpp -lm
c++ -std=c++11 -I../.. -I.. -g -o mkworld ../../node/C25519.cpp ../../node/Salsa20.cpp ../../node/SHA512.cpp ../../node/Identity.cpp ../../node/Utils.cpp ../../node/InetAddress.cpp ../../osdep/OSUtils.cpp mkworld.cpp -lm

View file

@ -1,13 +0,0 @@
ARG ZT_NAME
FROM 084037375216.dkr.ecr.us-east-2.amazonaws.com/${ZT_NAME}-builder as builder
WORKDIR /work/build
COPY . .
RUN pwd
RUN ls -la .
RUN make clean
RUN make debian
RUN ls -ls /work
FROM scratch AS export
ARG ZT_NAME
COPY --from=builder /work/*.deb ./${ZT_NAME}/

View file

@ -1,36 +0,0 @@
ARG DOCKER_ARCH
FROM --platform=linux/${DOCKER_ARCH} alpine:edge AS builder
RUN apk update
RUN apk add curl
RUN apk add bash
RUN apk add file
RUN apk add rust
RUN apk add cargo
RUN apk add make
RUN apk add cmake
RUN apk add clang
RUN apk add openssl-dev
RUN apk add linux-headers
RUN apk add build-base
RUN apk add openssl-libs-static
COPY . .
RUN ZT_STATIC=1 make
RUN ls -la
ARG DOCKER_ARCH
FROM --platform=linux/${DOCKER_ARCH} centos:6 AS stage
WORKDIR /root/rpmbuild/BUILD
COPY . .
COPY --from=builder zerotier-* ./
RUN curl https://gist.githubusercontent.com/someara/b363002ba6e57b3c474dd027d4daef85/raw/4ac5534139752fc92fbe1a53599a390214f69615/el6%2520vault --output /etc/yum.repos.d/CentOS-Base.repo
RUN uname -a
RUN yum -y install make gcc rpm-build
RUN pwd
RUN ls -la
RUN make redhat
FROM scratch AS export
ARG ZT_NAME
COPY --from=stage /root/rpmbuild/RPMS/*/*.rpm ./${ZT_NAME}/

View file

@ -1,9 +0,0 @@
ARG ZT_NAME
FROM 084037375216.dkr.ecr.us-east-2.amazonaws.com/${ZT_NAME}-builder as builder
WORKDIR /root/rpmbuild/BUILD
COPY . .
RUN make redhat
FROM scratch AS export
ARG ZT_NAME
COPY --from=builder /root/rpmbuild/RPMS/*/*.rpm ./${ZT_NAME}/

View file

@ -1,13 +0,0 @@
ARG ZT_NAME
FROM 084037375216.dkr.ecr.us-east-2.amazonaws.com/${ZT_NAME}-tester
ARG BASEURL
ARG VERSION
ARG DEB_ARCH
ARG ZT_NAME
ARG DISTRO
RUN curl -s http://${BASEURL}/key.gpg -o /etc/apt/trusted.gpg.d/zerotier.gpg
RUN echo "deb [arch=${DEB_ARCH} signed-by=/etc/apt/trusted.gpg.d/zerotier.gpg] http://${BASEURL}/${DISTRO} ${ZT_NAME} main" > /etc/apt/sources.list.d/zerotier.list
RUN apt-get -qq update
RUN apt-get -qq install zerotier-one=${VERSION}
RUN ldd $(which zerotier-cli)

View file

@ -1,4 +0,0 @@
ARG DOCKER_ARCH
FROM --platform=linux/${DOCKER_ARCH} centos:6
RUN printf "[C6.10-base]\nname=CentOS-6.10 - Base\nbaseurl=http://vault.epel.cloud/6.10/os/\$basearch/\ngpgcheck=1\ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6\nenabled=1\nmetadata_expire=never\n" > /etc/yum.repos.d/CentOS-Base.repo
RUN yum -y install curl

View file

@ -1,17 +0,0 @@
ARG ZT_NAME
FROM 084037375216.dkr.ecr.us-east-2.amazonaws.com/${ZT_NAME}-tester
ARG BASEURL
ARG VERSION
ARG DEB_ARCH
ARG ZT_NAME
ARG DISTRO
ARG DNF_ARCH
RUN curl -s http://${BASEURL}/key.asc -o /etc/pki/rpm-gpg/RPM-GPG-KEY-zerotier
RUN rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-zerotier
RUN rpm -q gpg-pubkey --qf '%{NAME}-%{VERSION}-%{RELEASE}\t%{SUMMARY}\n'
RUN printf "[zerotier]\nname=zerotier\nbaseurl=http://${BASEURL}/${DISTRO}/${ZT_NAME}/$basearch/\nenabled=1\ngpgcheck=1\ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-zerotier\n" > /etc/yum.repos.d/zerotier.repo
# RUN yum -v repolist
RUN setarch ${DNF_ARCH} yum -y install zerotier-one-${VERSION}
RUN file $(which zerotier-cli)
RUN ldd $(which zerotier-cli)

View file

@ -1,49 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
ZT_NAME="$1" ; shift
DISTRO="$1" ; shift
ZT_ISA="$1" ; shift
VERSION="$1" ; shift
BUILD_EVENT="$1" ; shift
source "$(dirname $0)/lib.sh"
if [ -f "ci/Dockerfile-build.${ZT_NAME}" ]; then
DOCKERFILE="ci/Dockerfile-build.${ZT_NAME}"
else
DOCKERFILE="ci/Dockerfile-build.${PKGFMT}"
fi
echo "#~~~~~~~~~~~~~~~~~~~~"
echo "$0 variables:"
echo "nproc: $(nproc)"
echo "ZT_NAME: ${ZT_NAME}"
echo "DISTRO: ${DISTRO}"
echo "ZT_ISA: ${ZT_ISA}"
echo "VERSION: ${VERSION}"
echo "BUILD_EVENT: ${BUILD_EVENT}"
echo "DOCKER_ARCH: ${DOCKER_ARCH}"
echo "DNF_ARCH: ${DNF_ARCH}"
echo "RUST_TRIPLET: ${RUST_TRIPLET}"
echo "PKGFMT: ${PKGFMT}"
echo "PWD: ${PWD}"
echo "DOCKERFILE: ${DOCKERFILE}"
echo "#~~~~~~~~~~~~~~~~~~~~"
make munge_rpm zerotier-one.spec VERSION=${VERSION}
make munge_deb debian/changelog VERSION=${VERSION}
docker buildx build \
--no-cache=true \
--build-arg ZT_NAME="${ZT_NAME}" \
--build-arg RUST_TRIPLET="${RUST_TRIPLET}" \
--build-arg DOCKER_ARCH="${DOCKER_ARCH}" \
--build-arg DNF_ARCH="${DNF_ARCH}" \
--platform linux/${DOCKER_ARCH} \
-f ${DOCKERFILE} \
-t build \
. \
--output type=local,dest=. \
--target export

View file

@ -1,63 +0,0 @@
case $ZT_NAME in
el*|fc*|amzn*)
export PKGFMT=rpm
;;
*)
export PKGFMT=deb
esac
case $ZT_ISA in
386)
export DOCKER_ARCH=386
export DEB_ARCH=i386
export DNF_ARCH=i686
export RUST_TRIPLET=i686-unknown-linux-gnu
;;
amd64)
export DOCKER_ARCH=amd64
export DEB_ARCH=amd64
export DNF_ARCH=x86_64
export RUST_TRIPLET=x86_64-unknown-linux-gnu
;;
armv7)
export DOCKER_ARCH=arm/v7
export DNF_ARCH=armv7
export DEB_ARCH=armhf
export RUST_TRIPLET=armv7-unknown-linux-gnueabihf
;;
arm64)
export DOCKER_ARCH=arm64/v8
export DEB_ARCH=arm64
export DNF_ARCH=linux64
export RUST_TRIPLET=aarch64-unknown-linux-gnu
;;
riscv64)
export DOCKER_ARCH=riscv64
export DEB_ARCH=riscv64
export DNF_ARCH=riscv64
export RUST_TRIPLET=riscv64gc-unknown-linux-gnu
;;
ppc64le)
export DOCKER_ARCH=ppc64le
export DEB_ARCH=ppc64el
export DNF_ARCH=ppc64le
export RUST_TRIPLET=powerpc64le-unknown-linux-gnu
;;
mips64le)
export DOCKER_ARCH=mips64le
export DEB_ARCH=mips64le
export DNF_ARCH=mips64le
export RUST_TRIPLET=mips64el-unknown-linux-gnuabi64
;;
s390x)
export DOCKER_ARCH=s390x
export DEB_ARCH=s390x
export DNF_ARCH=s390x
export RUST_TRIPLET=s390x-unknown-linux-gnu
;;
*)
echo "ERROR: could not determine architecture settings. PLEASE FIX ME"
exit 1
;;
esac

View file

@ -1,37 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
export FILE=$1
export VERSION=$2
export NAME=$3
export MESSAGE=$4
export DATE=$(date "+%a, %d %b %Y %T %z")
# export DATE=$(date "+%a %b %d %Y")
set +e
grep --version | grep BSD &> /dev/null
if [ $? == 0 ]; then BSDGREP=true ; else BSDGREP=false ; fi
set -e
# echo "#~~~~~~~~~~~~~~~~~~~~"
# echo "$0 variables:"
# echo "VERSION: ${VERSION}"
# echo "NAME: ${NAME}"
# echo "MESSAGE: ${MESSAGE}"
# echo "DATE: ${DATE}"
# echo "BSDGREP: ${BSDGREP}"
# echo "#~~~~~~~~~~~~~~~~~~~~"
# echo
if $BSDGREP ; then
sed -i '' s/^Version:.*/"Version: ${VERSION}"/ ${FILE}
else
sed -i s/^Version:.*/"Version: ${VERSION}"/ ${FILE}
fi
awk -v version=${VERSION} -v date=${DATE} -v name=${NAME} -v message=${MESSAGE} \
'BEGIN{print "zerotier-one (" version ") stable; urgency=medium\n\n * " message "\n\n -- " name " " date "\n" }{ print }' \
${FILE} > ${FILE}.new
mv ${FILE}.new ${FILE}

View file

@ -1,36 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
export FILE=$1
export VERSION=$2
export NAME=$3
export MESSAGE=$4
export DATE=$(date "+%a %b %d %Y")
set +e
grep --version | grep BSD &> /dev/null
if [ $? == 0 ]; then BSDGREP=true ; else BSDGREP=false ; fi
set -e
# echo "#~~~~~~~~~~~~~~~~~~~~"
# echo "$0 variables:"
# echo "VERSION: ${VERSION}"
# echo "NAME: ${NAME}"
# echo "MESSAGE: ${MESSAGE}"
# echo "DATE: ${DATE}"
# echo "BSDGREP: ${BSDGREP}"
# echo "#~~~~~~~~~~~~~~~~~~~~"
# echo
if $BSDGREP ; then
sed -i '' s/^Version:.*/"Version: ${VERSION}"/ ${FILE}
else
sed -i s/^Version:.*/"Version: ${VERSION}"/ ${FILE}
fi
awk -v version=${VERSION} -v date=${DATE} -v name=${NAME} -v message=${MESSAGE} \
'FNR==NR{ if (/%changelog/) p=NR; next} 1; FNR==p{ print "* " date " " name " - " version "\n- " message "\n" }' \
${FILE} ${FILE} > ${FILE}.new
mv ${FILE}.new ${FILE}

View file

@ -1,38 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
ZT_NAME="$1" ; shift
DISTRO="$1" ; shift
ZT_ISA="$1" ; shift
VERSION="$1" ; shift
BUILD_EVENT="$1" ; shift
source "$(dirname $0)/lib.sh"
if [ ${BUILD_EVENT} == "tag" ]; then
CHANNEL="zerotier-releases"
else
CHANNEL="zerotier-builds"
fi
function publish_rpm {
mkdir -p /${CHANNEL}/${DISTRO}
ls -la /${CHANNEL}
ls -la .
cp -a ${ZT_NAME} /${CHANNEL}/${DISTRO}
}
function publish_deb {
mkdir -p /${CHANNEL}/${DISTRO}/pool/dists/${ZT_NAME}/main
cp -a ${ZT_NAME}/* /${CHANNEL}/${DISTRO}/pool/dists/${ZT_NAME}/main
}
case ${PKGFMT} in
"rpm")
publish_rpm
;;
"deb")
publish_deb
esac

View file

@ -1,55 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
ZT_NAME="$1" ; shift
DISTRO="$1" ; shift
ZT_ISA="$1" ; shift
VERSION="$1" ; shift
BUILD_EVENT="$1" ; shift
source "$(dirname $0)/lib.sh"
if [ -f "ci/Dockerfile-test.${ZT_NAME}" ]; then
DOCKERFILE="ci/Dockerfile-test.${ZT_NAME}"
else
DOCKERFILE="ci/Dockerfile-test.${PKGFMT}"
fi
if [ ${BUILD_EVENT} == "tag" ]; then
BASEURL="zerotier-releases.home.arpa"
else
BASEURL="zerotier-builds.home.arpa"
fi
echo "#~~~~~~~~~~~~~~~~~~~~"
echo "$0 variables:"
echo "nproc: $(nproc)"
echo "ZT_NAME: ${ZT_NAME}"
echo "DISTRO: ${DISTRO}"
echo "ZT_ISA: ${ZT_ISA}"
echo "VERSION: ${VERSION}"
echo "BUILD_EVENT: ${BUILD_EVENT}"
echo "DOCKER_ARCH: ${DOCKER_ARCH}"
echo "DNF_ARCH: ${DNF_ARCH}"
echo "RUST_TRIPLET: ${RUST_TRIPLET}"
echo "PKGFMT: ${PKGFMT}"
echo "PWD: ${PWD}"
echo "DOCKERFILE: ${DOCKERFILE}"
echo "#~~~~~~~~~~~~~~~~~~~~"
# docker pull -q --platform="linux/${DOCKER_ARCH}" 084037375216.dkr.ecr.us-east-2.amazonaws.com/${ZT_NAME}-tester
docker buildx build \
--build-arg BASEURL="${BASEURL}" \
--build-arg ZT_NAME="${ZT_NAME}" \
--build-arg DISTRO="${DISTRO}" \
--build-arg DEB_ARCH="${DEB_ARCH}" \
--build-arg DNF_ARCH="${DNF_ARCH}" \
--build-arg VERSION="${VERSION}" \
--build-arg DOCKER_ARCH="${DOCKER_ARCH}" \
--platform "linux/${DOCKER_ARCH}" \
--no-cache \
-f ${DOCKERFILE} \
-t test \
.

View file

@ -4,7 +4,7 @@
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
* Change Date: 2025-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
@ -19,8 +19,6 @@
#define _DEBUG(x)
#endif
#include "../node/Metrics.hpp"
#include <deque>
#include <set>
#include <memory>
@ -61,11 +59,8 @@ public:
, m_minPoolSize(min_pool_size)
, m_factory(factory)
{
Metrics::max_pool_size += max_pool_size;
Metrics::min_pool_size += min_pool_size;
while(m_pool.size() < m_minPoolSize){
m_pool.push_back(m_factory->create());
Metrics::pool_avail++;
}
};
@ -96,19 +91,16 @@ public:
while((m_pool.size() + m_borrowed.size()) < m_minPoolSize) {
std::shared_ptr<Connection> conn = m_factory->create();
m_pool.push_back(conn);
Metrics::pool_avail++;
}
if(m_pool.size()==0){
if ((m_pool.size() + m_borrowed.size()) < m_maxPoolSize) {
if ((m_pool.size() + m_borrowed.size()) <= m_maxPoolSize) {
try {
std::shared_ptr<Connection> conn = m_factory->create();
m_borrowed.insert(conn);
Metrics::pool_in_use++;
return std::static_pointer_cast<T>(conn);
} catch (std::exception &e) {
Metrics::pool_errors++;
throw ConnectionUnavailable();
}
} else {
@ -124,13 +116,11 @@ public:
return std::static_pointer_cast<T>(conn);
} catch(std::exception& e) {
// Error creating a replacement connection
Metrics::pool_errors++;
throw ConnectionUnavailable();
}
}
}
// Nothing available
Metrics::pool_errors++;
throw ConnectionUnavailable();
}
}
@ -138,10 +128,8 @@ public:
// Take one off the front
std::shared_ptr<Connection> conn = m_pool.front();
m_pool.pop_front();
Metrics::pool_avail--;
// Add it to the borrowed list
m_borrowed.insert(conn);
Metrics::pool_in_use++;
return std::static_pointer_cast<T>(conn);
};
@ -155,9 +143,7 @@ public:
// Lock
std::unique_lock<std::mutex> lock(m_poolMutex);
m_borrowed.erase(conn);
Metrics::pool_in_use--;
if ((m_pool.size() + m_borrowed.size()) < m_maxPoolSize) {
Metrics::pool_avail++;
m_pool.push_back(conn);
}
};

View file

@ -4,7 +4,7 @@
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
* Change Date: 2025-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
@ -13,7 +13,6 @@
#include "DB.hpp"
#include "EmbeddedNetworkController.hpp"
#include "../node/Metrics.hpp"
#include <chrono>
#include <algorithm>
@ -108,17 +107,16 @@ DB::~DB() {}
bool DB::get(const uint64_t networkId,nlohmann::json &network)
{
waitForReady();
Metrics::db_get_network++;
std::shared_ptr<_Network> nw;
{
std::shared_lock<std::shared_mutex> l(_networks_l);
std::lock_guard<std::mutex> l(_networks_l);
auto nwi = _networks.find(networkId);
if (nwi == _networks.end())
return false;
nw = nwi->second;
}
{
std::shared_lock<std::shared_mutex> l2(nw->lock);
std::lock_guard<std::mutex> l2(nw->lock);
network = nw->config;
}
return true;
@ -127,17 +125,16 @@ bool DB::get(const uint64_t networkId,nlohmann::json &network)
bool DB::get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member)
{
waitForReady();
Metrics::db_get_network_and_member++;
std::shared_ptr<_Network> nw;
{
std::shared_lock<std::shared_mutex> l(_networks_l);
std::lock_guard<std::mutex> l(_networks_l);
auto nwi = _networks.find(networkId);
if (nwi == _networks.end())
return false;
nw = nwi->second;
}
{
std::shared_lock<std::shared_mutex> l2(nw->lock);
std::lock_guard<std::mutex> l2(nw->lock);
network = nw->config;
auto m = nw->members.find(memberId);
if (m == nw->members.end())
@ -150,17 +147,16 @@ bool DB::get(const uint64_t networkId,nlohmann::json &network,const uint64_t mem
bool DB::get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member,NetworkSummaryInfo &info)
{
waitForReady();
Metrics::db_get_network_and_member_and_summary++;
std::shared_ptr<_Network> nw;
{
std::shared_lock<std::shared_mutex> l(_networks_l);
std::lock_guard<std::mutex> l(_networks_l);
auto nwi = _networks.find(networkId);
if (nwi == _networks.end())
return false;
nw = nwi->second;
}
{
std::shared_lock<std::shared_mutex> l2(nw->lock);
std::lock_guard<std::mutex> l2(nw->lock);
network = nw->config;
_fillSummaryInfo(nw,info);
auto m = nw->members.find(memberId);
@ -174,17 +170,16 @@ bool DB::get(const uint64_t networkId,nlohmann::json &network,const uint64_t mem
bool DB::get(const uint64_t networkId,nlohmann::json &network,std::vector<nlohmann::json> &members)
{
waitForReady();
Metrics::db_get_member_list++;
std::shared_ptr<_Network> nw;
{
std::shared_lock<std::shared_mutex> l(_networks_l);
std::lock_guard<std::mutex> l(_networks_l);
auto nwi = _networks.find(networkId);
if (nwi == _networks.end())
return false;
nw = nwi->second;
}
{
std::shared_lock<std::shared_mutex> l2(nw->lock);
std::lock_guard<std::mutex> l2(nw->lock);
network = nw->config;
for(auto m=nw->members.begin();m!=nw->members.end();++m) {
members.push_back(m->second);
@ -196,15 +191,21 @@ bool DB::get(const uint64_t networkId,nlohmann::json &network,std::vector<nlohma
void DB::networks(std::set<uint64_t> &networks)
{
waitForReady();
Metrics::db_get_network_list++;
std::shared_lock<std::shared_mutex> l(_networks_l);
std::lock_guard<std::mutex> l(_networks_l);
for(auto n=_networks.begin();n!=_networks.end();++n)
networks.insert(n->first);
}
void DB::networkMemberSSOHasExpired(uint64_t nwid, int64_t now) {
std::lock_guard<std::mutex> l(_networks_l);
auto nw = _networks.find(nwid);
if (nw != _networks.end()) {
nw->second->mostRecentDeauthTime = now;
}
}
void DB::_memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool notifyListeners)
{
Metrics::db_member_change++;
uint64_t memberId = 0;
uint64_t networkId = 0;
bool isAuth = false;
@ -216,21 +217,18 @@ void DB::_memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool no
networkId = OSUtils::jsonIntHex(old["nwid"],0ULL);
if ((memberId)&&(networkId)) {
{
std::unique_lock<std::shared_mutex> l(_networks_l);
std::lock_guard<std::mutex> l(_networks_l);
auto nw2 = _networks.find(networkId);
if (nw2 != _networks.end()) {
if (nw2 != _networks.end())
nw = nw2->second;
}
}
if (nw) {
std::unique_lock<std::shared_mutex> l(nw->lock);
if (OSUtils::jsonBool(old["activeBridge"],false)) {
std::lock_guard<std::mutex> l(nw->lock);
if (OSUtils::jsonBool(old["activeBridge"],false))
nw->activeBridgeMembers.erase(memberId);
}
wasAuth = OSUtils::jsonBool(old["authorized"],false);
if (wasAuth) {
if (wasAuth)
nw->authorizedMembers.erase(memberId);
}
json &ips = old["ipAssignments"];
if (ips.is_array()) {
for(unsigned long i=0;i<ips.size();++i) {
@ -253,7 +251,7 @@ void DB::_memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool no
networkId = OSUtils::jsonIntHex(memberConfig["nwid"],0ULL);
if ((!memberId)||(!networkId))
return;
std::unique_lock<std::shared_mutex> l(_networks_l);
std::lock_guard<std::mutex> l(_networks_l);
std::shared_ptr<_Network> &nw2 = _networks[networkId];
if (!nw2)
nw2.reset(new _Network);
@ -261,18 +259,15 @@ void DB::_memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool no
}
{
std::unique_lock<std::shared_mutex> l(nw->lock);
std::lock_guard<std::mutex> l(nw->lock);
nw->members[memberId] = memberConfig;
if (OSUtils::jsonBool(memberConfig["activeBridge"],false)) {
if (OSUtils::jsonBool(memberConfig["activeBridge"],false))
nw->activeBridgeMembers.insert(memberId);
}
isAuth = OSUtils::jsonBool(memberConfig["authorized"],false);
if (isAuth) {
Metrics::member_auths++;
if (isAuth)
nw->authorizedMembers.insert(memberId);
}
json &ips = memberConfig["ipAssignments"];
if (ips.is_array()) {
for(unsigned long i=0;i<ips.size();++i) {
@ -294,18 +289,18 @@ void DB::_memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool no
}
if (notifyListeners) {
std::unique_lock<std::shared_mutex> ll(_changeListeners_l);
std::lock_guard<std::mutex> ll(_changeListeners_l);
for(auto i=_changeListeners.begin();i!=_changeListeners.end();++i) {
(*i)->onNetworkMemberUpdate(this,networkId,memberId,memberConfig);
}
}
} else if (memberId) {
if (nw) {
std::unique_lock<std::shared_mutex> l(nw->lock);
std::lock_guard<std::mutex> l(nw->lock);
nw->members.erase(memberId);
}
if (networkId) {
std::unique_lock<std::shared_mutex> l(_networks_l);
std::lock_guard<std::mutex> l(_networks_l);
auto er = _networkByMember.equal_range(memberId);
for(auto i=er.first;i!=er.second;++i) {
if (i->second == networkId) {
@ -316,26 +311,8 @@ void DB::_memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool no
}
}
if (notifyListeners) {
if(networkId != 0 && memberId != 0 && old.is_object() && !memberConfig.is_object()) {
// member delete
Metrics::member_count--;
} else if (networkId != 0 && memberId != 0 && !old.is_object() && memberConfig.is_object()) {
// new member
Metrics::member_count++;
}
if (!wasAuth && isAuth) {
Metrics::member_auths++;
} else if (wasAuth && !isAuth) {
Metrics::member_deauths++;
} else {
Metrics::member_changes++;
}
}
if ((notifyListeners)&&((wasAuth)&&(!isAuth)&&(networkId)&&(memberId))) {
std::unique_lock<std::shared_mutex> ll(_changeListeners_l);
std::lock_guard<std::mutex> ll(_changeListeners_l);
for(auto i=_changeListeners.begin();i!=_changeListeners.end();++i) {
(*i)->onNetworkMemberDeauthorize(this,networkId,memberId);
}
@ -344,35 +321,24 @@ void DB::_memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool no
void DB::_networkChanged(nlohmann::json &old,nlohmann::json &networkConfig,bool notifyListeners)
{
Metrics::db_network_change++;
if (notifyListeners) {
if (old.is_object() && old.contains("id") && networkConfig.is_object() && networkConfig.contains("id")) {
Metrics::network_changes++;
} else if (!old.is_object() && networkConfig.is_object() && networkConfig.contains("id")) {
Metrics::network_count++;
} else if (old.is_object() && old.contains("id") && !networkConfig.is_object()) {
Metrics::network_count--;
}
}
if (networkConfig.is_object()) {
const std::string ids = networkConfig["id"];
const uint64_t networkId = Utils::hexStrToU64(ids.c_str());
if (networkId) {
std::shared_ptr<_Network> nw;
{
std::unique_lock<std::shared_mutex> l(_networks_l);
std::lock_guard<std::mutex> l(_networks_l);
std::shared_ptr<_Network> &nw2 = _networks[networkId];
if (!nw2)
nw2.reset(new _Network);
nw = nw2;
}
{
std::unique_lock<std::shared_mutex> l2(nw->lock);
std::lock_guard<std::mutex> l2(nw->lock);
nw->config = networkConfig;
}
if (notifyListeners) {
std::unique_lock<std::shared_mutex> ll(_changeListeners_l);
std::lock_guard<std::mutex> ll(_changeListeners_l);
for(auto i=_changeListeners.begin();i!=_changeListeners.end();++i) {
(*i)->onNetworkUpdate(this,networkId,networkConfig);
}
@ -382,25 +348,7 @@ void DB::_networkChanged(nlohmann::json &old,nlohmann::json &networkConfig,bool
const std::string ids = old["id"];
const uint64_t networkId = Utils::hexStrToU64(ids.c_str());
if (networkId) {
try {
// deauth all members on the network
nlohmann::json network;
std::vector<nlohmann::json> members;
this->get(networkId, network, members);
for(auto i=members.begin();i!=members.end();++i) {
const std::string nodeID = (*i)["id"];
const uint64_t memberId = Utils::hexStrToU64(nodeID.c_str());
std::unique_lock<std::shared_mutex> ll(_changeListeners_l);
for(auto j=_changeListeners.begin();j!=_changeListeners.end();++j) {
(*j)->onNetworkMemberDeauthorize(this,networkId,memberId);
}
}
} catch (std::exception &e) {
std::cerr << "Error deauthorizing members on network delete: " << e.what() << std::endl;
}
// delete the network
std::unique_lock<std::shared_mutex> l(_networks_l);
std::lock_guard<std::mutex> l(_networks_l);
_networks.erase(networkId);
}
}

View file

@ -4,7 +4,7 @@
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
* Change Date: 2025-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
@ -29,47 +29,17 @@
#include <unordered_set>
#include <vector>
#include <atomic>
#include <shared_mutex>
#include <mutex>
#include <set>
#include <map>
#include <nlohmann/json.hpp>
#include <prometheus/simpleapi.h>
#include "../ext/json/json.hpp"
#define ZT_MEMBER_AUTH_TIMEOUT_NOTIFY_BEFORE 25000
namespace ZeroTier
{
struct AuthInfo
{
public:
AuthInfo()
: enabled(false)
, version(0)
, authenticationURL()
, authenticationExpiryTime(0)
, issuerURL()
, centralAuthURL()
, ssoNonce()
, ssoState()
, ssoClientID()
, ssoProvider("default")
{}
bool enabled;
uint64_t version;
std::string authenticationURL;
uint64_t authenticationExpiryTime;
std::string issuerURL;
std::string centralAuthURL;
std::string ssoNonce;
std::string ssoState;
std::string ssoClientID;
std::string ssoProvider;
};
/**
* Base class with common infrastructure for all controller DB implementations
*/
@ -109,7 +79,7 @@ public:
inline bool hasNetwork(const uint64_t networkId) const
{
std::shared_lock<std::shared_mutex> l(_networks_l);
std::lock_guard<std::mutex> l(_networks_l);
return (_networks.find(networkId) != _networks.end());
}
@ -124,7 +94,7 @@ public:
inline void each(F f)
{
nlohmann::json nullJson;
std::unique_lock<std::shared_mutex> lck(_networks_l);
std::lock_guard<std::mutex> lck(_networks_l);
for(auto nw=_networks.begin();nw!=_networks.end();++nw) {
f(nw->first,nw->second->config,0,nullJson); // first provide network with 0 for member ID
for(auto m=nw->second->members.begin();m!=nw->second->members.end();++m) {
@ -138,11 +108,12 @@ public:
virtual void eraseMember(const uint64_t networkId,const uint64_t memberId) = 0;
virtual void nodeIsOnline(const uint64_t networkId,const uint64_t memberId,const InetAddress &physicalAddress) = 0;
virtual AuthInfo getSSOAuthInfo(const nlohmann::json &member, const std::string &redirectURL) { return AuthInfo(); }
virtual std::string getSSOAuthURL(const nlohmann::json &member, const std::string &redirectURL) { return ""; }
virtual void networkMemberSSOHasExpired(uint64_t nwid, int64_t ts);
inline void addListener(DB::ChangeListener *const listener)
{
std::unique_lock<std::shared_mutex> l(_changeListeners_l);
std::lock_guard<std::mutex> l(_changeListeners_l);
_changeListeners.push_back(listener);
}
@ -178,7 +149,7 @@ protected:
std::unordered_set<uint64_t> authorizedMembers;
std::unordered_set<InetAddress,InetAddress::Hasher> allocatedIps;
int64_t mostRecentDeauthTime;
std::shared_mutex lock;
std::mutex lock;
};
virtual void _memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool notifyListeners);
@ -188,8 +159,8 @@ protected:
std::vector<DB::ChangeListener *> _changeListeners;
std::unordered_map< uint64_t,std::shared_ptr<_Network> > _networks;
std::unordered_multimap< uint64_t,uint64_t > _networkByMember;
mutable std::shared_mutex _changeListeners_l;
mutable std::shared_mutex _networks_l;
mutable std::mutex _changeListeners_l;
mutable std::mutex _networks_l;
};
} // namespace ZeroTier

View file

@ -4,7 +4,7 @@
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
* Change Date: 2025-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
@ -15,12 +15,9 @@
namespace ZeroTier {
DBMirrorSet::DBMirrorSet(DB::ChangeListener *listener)
: _listener(listener)
, _running(true)
, _syncCheckerThread()
, _dbs()
, _dbs_l()
DBMirrorSet::DBMirrorSet(DB::ChangeListener *listener) :
_listener(listener),
_running(true)
{
_syncCheckerThread = std::thread([this]() {
for(;;) {
@ -32,7 +29,7 @@ DBMirrorSet::DBMirrorSet(DB::ChangeListener *listener)
std::vector< std::shared_ptr<DB> > dbs;
{
std::unique_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
if (_dbs.size() <= 1)
continue; // no need to do this if there's only one DB, so skip the iteration
dbs = _dbs;
@ -79,7 +76,7 @@ DBMirrorSet::~DBMirrorSet()
bool DBMirrorSet::hasNetwork(const uint64_t networkId) const
{
std::shared_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
if ((*d)->hasNetwork(networkId))
return true;
@ -89,7 +86,7 @@ bool DBMirrorSet::hasNetwork(const uint64_t networkId) const
bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network)
{
std::shared_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
if ((*d)->get(networkId,network)) {
return true;
@ -100,7 +97,7 @@ bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network)
bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member)
{
std::shared_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
if ((*d)->get(networkId,network,memberId,member))
return true;
@ -110,7 +107,7 @@ bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network,const uin
bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member,DB::NetworkSummaryInfo &info)
{
std::shared_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
if ((*d)->get(networkId,network,memberId,member,info))
return true;
@ -120,7 +117,7 @@ bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network,const uin
bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network,std::vector<nlohmann::json> &members)
{
std::shared_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
if ((*d)->get(networkId,network,members))
return true;
@ -128,21 +125,29 @@ bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network,std::vect
return false;
}
AuthInfo DBMirrorSet::getSSOAuthInfo(const nlohmann::json &member, const std::string &redirectURL)
std::string DBMirrorSet::getSSOAuthURL(const nlohmann::json &member, const std::string &redirectURL)
{
std::shared_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
AuthInfo info = (*d)->getSSOAuthInfo(member, redirectURL);
if (info.enabled) {
return info;
std::string url = (*d)->getSSOAuthURL(member, redirectURL);
if (!url.empty()) {
return url;
}
}
return AuthInfo();
return "";
}
void DBMirrorSet::networkMemberSSOHasExpired(uint64_t nwid, int64_t ts)
{
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
(*d)->networkMemberSSOHasExpired(nwid, ts);
}
}
void DBMirrorSet::networks(std::set<uint64_t> &networks)
{
std::shared_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
(*d)->networks(networks);
}
@ -151,7 +156,7 @@ void DBMirrorSet::networks(std::set<uint64_t> &networks)
bool DBMirrorSet::waitForReady()
{
bool r = false;
std::shared_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
r |= (*d)->waitForReady();
}
@ -160,7 +165,7 @@ bool DBMirrorSet::waitForReady()
bool DBMirrorSet::isReady()
{
std::shared_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
if (!(*d)->isReady())
return false;
@ -172,7 +177,7 @@ bool DBMirrorSet::save(nlohmann::json &record,bool notifyListeners)
{
std::vector< std::shared_ptr<DB> > dbs;
{
std::unique_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
dbs = _dbs;
}
if (notifyListeners) {
@ -192,7 +197,7 @@ bool DBMirrorSet::save(nlohmann::json &record,bool notifyListeners)
void DBMirrorSet::eraseNetwork(const uint64_t networkId)
{
std::unique_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
(*d)->eraseNetwork(networkId);
}
@ -200,7 +205,7 @@ void DBMirrorSet::eraseNetwork(const uint64_t networkId)
void DBMirrorSet::eraseMember(const uint64_t networkId,const uint64_t memberId)
{
std::unique_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
(*d)->eraseMember(networkId,memberId);
}
@ -208,7 +213,7 @@ void DBMirrorSet::eraseMember(const uint64_t networkId,const uint64_t memberId)
void DBMirrorSet::nodeIsOnline(const uint64_t networkId,const uint64_t memberId,const InetAddress &physicalAddress)
{
std::shared_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
(*d)->nodeIsOnline(networkId,memberId,physicalAddress);
}
@ -217,7 +222,7 @@ void DBMirrorSet::nodeIsOnline(const uint64_t networkId,const uint64_t memberId,
void DBMirrorSet::onNetworkUpdate(const void *db,uint64_t networkId,const nlohmann::json &network)
{
nlohmann::json record(network);
std::unique_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
if (d->get() != db) {
(*d)->save(record,false);
@ -229,7 +234,7 @@ void DBMirrorSet::onNetworkUpdate(const void *db,uint64_t networkId,const nlohma
void DBMirrorSet::onNetworkMemberUpdate(const void *db,uint64_t networkId,uint64_t memberId,const nlohmann::json &member)
{
nlohmann::json record(member);
std::unique_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
for(auto d=_dbs.begin();d!=_dbs.end();++d) {
if (d->get() != db) {
(*d)->save(record,false);
@ -243,4 +248,47 @@ void DBMirrorSet::onNetworkMemberDeauthorize(const void *db,uint64_t networkId,u
_listener->onNetworkMemberDeauthorize(this,networkId,memberId);
}
void DBMirrorSet::membersExpiring(std::set< std::pair<uint64_t, uint64_t> > &soon, std::set< std::pair<uint64_t, uint64_t> > &expired)
{
std::unique_lock<std::mutex> l(_membersExpiringSoon_l);
int64_t now = OSUtils::now();
for(auto next=_membersExpiringSoon.begin();next!=_membersExpiringSoon.end();) {
if (next->first > now) {
const uint64_t nwid = next->second.first;
const uint64_t memberId = next->second.second;
nlohmann::json network, member;
if (this->get(nwid, network, memberId, member)) {
try {
const bool authorized = member["authorized"];
const bool ssoExempt = member["ssoExempt"];
const int64_t authenticationExpiryTime = member["authenticationExpiryTime"];
if ((authenticationExpiryTime == next->first)&&(authorized)&&(!ssoExempt)) {
if ((authenticationExpiryTime - now) > ZT_MEMBER_AUTH_TIMEOUT_NOTIFY_BEFORE) {
// Stop when we get to entries too far in the future.
break;
} else {
const bool ssoEnabled = network["ssoEnabled"];
if (ssoEnabled)
soon.insert(std::pair<uint64_t, uint64_t>(nwid, memberId));
}
} else {
// Obsolete entry, no longer authorized, or SSO exempt.
}
} catch ( ... ) {
// Invalid member object, erase.
}
} else {
// Not found.
}
}
_membersExpiringSoon.erase(next++);
}
}
void DBMirrorSet::memberWillExpire(int64_t expTime, uint64_t nwid, uint64_t memberId)
{
std::unique_lock<std::mutex> l(_membersExpiringSoon_l);
_membersExpiringSoon.insert(std::pair< int64_t, std::pair< uint64_t, uint64_t > >(expTime, std::pair< uint64_t, uint64_t >(nwid, memberId)));
}
} // namespace ZeroTier

View file

@ -4,7 +4,7 @@
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
* Change Date: 2025-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
@ -18,7 +18,7 @@
#include <vector>
#include <memory>
#include <shared_mutex>
#include <mutex>
#include <set>
#include <thread>
@ -51,21 +51,27 @@ public:
virtual void onNetworkMemberUpdate(const void *db,uint64_t networkId,uint64_t memberId,const nlohmann::json &member);
virtual void onNetworkMemberDeauthorize(const void *db,uint64_t networkId,uint64_t memberId);
AuthInfo getSSOAuthInfo(const nlohmann::json &member, const std::string &redirectURL);
std::string getSSOAuthURL(const nlohmann::json &member, const std::string &redirectURL);
void networkMemberSSOHasExpired(uint64_t nwid, int64_t ts);
inline void addDB(const std::shared_ptr<DB> &db)
{
db->addListener(this);
std::unique_lock<std::shared_mutex> l(_dbs_l);
std::lock_guard<std::mutex> l(_dbs_l);
_dbs.push_back(db);
}
void membersExpiring(std::set< std::pair<uint64_t, uint64_t> > &soon, std::set< std::pair<uint64_t, uint64_t> > &expired);
void memberWillExpire(int64_t expTime, uint64_t nwid, uint64_t memberId);
private:
DB::ChangeListener *const _listener;
std::atomic_bool _running;
std::thread _syncCheckerThread;
std::vector< std::shared_ptr< DB > > _dbs;
mutable std::shared_mutex _dbs_l;
mutable std::mutex _dbs_l;
std::set< std::pair< int64_t, std::pair<uint64_t, uint64_t> > > _membersExpiringSoon;
mutable std::mutex _membersExpiringSoon_l;
};
} // namespace ZeroTier

File diff suppressed because it is too large Load diff

View file

@ -4,7 +4,7 @@
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
* Change Date: 2025-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
@ -35,9 +35,7 @@
#include "../osdep/Thread.hpp"
#include "../osdep/BlockingQueue.hpp"
#include <nlohmann/json.hpp>
#include <cpp-httplib/httplib.h>
#include "../ext/json/json.hpp"
#include "DB.hpp"
#include "DBMirrorSet.hpp"
@ -68,10 +66,27 @@ public:
const Identity &identity,
const Dictionary<ZT_NETWORKCONFIG_METADATA_DICT_CAPACITY> &metaData);
void configureHTTPControlPlane(
httplib::Server &s,
httplib::Server &sV6,
const std::function<void(const httplib::Request&, httplib::Response&, std::string)>);
unsigned int handleControlPlaneHttpGET(
const std::vector<std::string> &path,
const std::map<std::string,std::string> &urlArgs,
const std::map<std::string,std::string> &headers,
const std::string &body,
std::string &responseBody,
std::string &responseContentType);
unsigned int handleControlPlaneHttpPOST(
const std::vector<std::string> &path,
const std::map<std::string,std::string> &urlArgs,
const std::map<std::string,std::string> &headers,
const std::string &body,
std::string &responseBody,
std::string &responseContentType);
unsigned int handleControlPlaneHttpDELETE(
const std::vector<std::string> &path,
const std::map<std::string,std::string> &urlArgs,
const std::map<std::string,std::string> &headers,
const std::string &body,
std::string &responseBody,
std::string &responseContentType);
void handleRemoteTrace(const ZT_RemoteTrace &rt);
@ -82,9 +97,6 @@ public:
private:
void _request(uint64_t nwid,const InetAddress &fromAddr,uint64_t requestPacketId,const Identity &identity,const Dictionary<ZT_NETWORKCONFIG_METADATA_DICT_CAPACITY> &metaData);
void _startThreads();
void _ssoExpiryThread();
std::string networkUpdateFromPostData(uint64_t networkID, const std::string &body);
struct _RQEntry
{
@ -97,7 +109,6 @@ private:
RQENTRY_TYPE_REQUEST = 0
} type;
};
struct _MemberStatusKey
{
_MemberStatusKey() : networkId(0),nodeId(0) {}
@ -105,13 +116,11 @@ private:
uint64_t networkId;
uint64_t nodeId;
inline bool operator==(const _MemberStatusKey &k) const { return ((k.networkId == networkId)&&(k.nodeId == nodeId)); }
inline bool operator<(const _MemberStatusKey &k) const { return (k.networkId < networkId) || ((k.networkId == networkId)&&(k.nodeId < nodeId)); }
};
struct _MemberStatus
{
_MemberStatus() : lastRequestTime(0),authenticationExpiryTime(-1),vMajor(-1),vMinor(-1),vRev(-1),vProto(-1) {}
int64_t lastRequestTime;
int64_t authenticationExpiryTime;
_MemberStatus() : lastRequestTime(0),vMajor(-1),vMinor(-1),vRev(-1),vProto(-1) {}
uint64_t lastRequestTime;
int vMajor,vMinor,vRev,vProto;
Dictionary<ZT_NETWORKCONFIG_METADATA_DICT_CAPACITY> lastRequestMetaData;
Identity identity;
@ -143,39 +152,8 @@ private:
std::unordered_map< _MemberStatusKey,_MemberStatus,_MemberStatusHash > _memberStatus;
std::mutex _memberStatus_l;
std::set< std::pair<int64_t, _MemberStatusKey> > _expiringSoon;
std::mutex _expiringSoon_l;
RedisConfig *_rc;
std::string _ssoRedirectURL;
bool _ssoExpiryRunning;
std::thread _ssoExpiry;
#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK
prometheus::simpleapi::benchmark_family_t _member_status_lookup;
prometheus::simpleapi::counter_family_t _member_status_lookup_count;
prometheus::simpleapi::benchmark_family_t _node_is_online;
prometheus::simpleapi::counter_family_t _node_is_online_count;
prometheus::simpleapi::benchmark_family_t _get_and_init_member;
prometheus::simpleapi::counter_family_t _get_and_init_member_count;
prometheus::simpleapi::benchmark_family_t _have_identity;
prometheus::simpleapi::counter_family_t _have_identity_count;
prometheus::simpleapi::benchmark_family_t _determine_auth;
prometheus::simpleapi::counter_family_t _determine_auth_count;
prometheus::simpleapi::benchmark_family_t _sso_check;
prometheus::simpleapi::counter_family_t _sso_check_count;
prometheus::simpleapi::benchmark_family_t _auth_check;
prometheus::simpleapi::counter_family_t _auth_check_count;
prometheus::simpleapi::benchmark_family_t _json_schlep;
prometheus::simpleapi::counter_family_t _json_schlep_count;
prometheus::simpleapi::benchmark_family_t _issue_certificate;
prometheus::simpleapi::counter_family_t _issue_certificate_count;
prometheus::simpleapi::benchmark_family_t _save_member;
prometheus::simpleapi::counter_family_t _save_member_count;
prometheus::simpleapi::benchmark_family_t _send_netconf;
prometheus::simpleapi::counter_family_t _send_netconf_count;
#endif
};
} // namespace ZeroTier

View file

@ -4,7 +4,7 @@
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
* Change Date: 2025-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
@ -13,8 +13,6 @@
#include "FileDB.hpp"
#include "../node/Metrics.hpp"
namespace ZeroTier
{
@ -41,7 +39,6 @@ FileDB::FileDB(const char *path) :
if (nwids.length() == 16) {
nlohmann::json nullJson;
_networkChanged(nullJson,network,false);
Metrics::network_count++;
std::string membersPath(_networksPath + ZT_PATH_SEPARATOR_S + nwids + ZT_PATH_SEPARATOR_S "member");
std::vector<std::string> members(OSUtils::listDirectory(membersPath.c_str(),false));
for(auto m=members.begin();m!=members.end();++m) {
@ -53,7 +50,6 @@ FileDB::FileDB(const char *path) :
if (addrs.length() == 10) {
nlohmann::json nullJson2;
_memberChanged(nullJson2,member,false);
Metrics::member_count++;
}
} catch ( ... ) {}
}
@ -92,9 +88,8 @@ bool FileDB::save(nlohmann::json &record,bool notifyListeners)
if ((!old.is_object())||(!_compareRecords(old,record))) {
record["revision"] = OSUtils::jsonInt(record["revision"],0ULL) + 1ULL;
OSUtils::ztsnprintf(p1,sizeof(p1),"%s" ZT_PATH_SEPARATOR_S "%.16llx.json",_networksPath.c_str(),nwid);
if (!OSUtils::writeFile(p1,OSUtils::jsonDump(record,-1))) {
if (!OSUtils::writeFile(p1,OSUtils::jsonDump(record,-1)))
fprintf(stderr,"WARNING: controller unable to write to path: %s" ZT_EOL_S,p1);
}
_networkChanged(old,record,notifyListeners);
modified = true;
}
@ -115,10 +110,9 @@ bool FileDB::save(nlohmann::json &record,bool notifyListeners)
OSUtils::ztsnprintf(p2,sizeof(p2),"%s" ZT_PATH_SEPARATOR_S "%.16llx",_networksPath.c_str(),(unsigned long long)nwid);
OSUtils::mkdir(p2);
OSUtils::mkdir(pb);
if (!OSUtils::writeFile(p1,OSUtils::jsonDump(record,-1))) {
if (!OSUtils::writeFile(p1,OSUtils::jsonDump(record,-1)))
fprintf(stderr,"WARNING: controller unable to write to path: %s" ZT_EOL_S,p1);
}
}
_memberChanged(old,record,notifyListeners);
modified = true;
}
@ -136,7 +130,7 @@ void FileDB::eraseNetwork(const uint64_t networkId)
char p[16384];
OSUtils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "%.16llx.json",_networksPath.c_str(),networkId);
OSUtils::rm(p);
OSUtils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "%.16llx",_networksPath.c_str(),(unsigned long long)networkId);
OSUtils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "%.16llx" ZT_PATH_SEPARATOR_S "member",_networksPath.c_str(),(unsigned long long)networkId);
OSUtils::rmDashRf(p);
_networkChanged(network,nullJson,true);
std::lock_guard<std::mutex> l(this->_online_l);

View file

@ -4,7 +4,7 @@
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
* Change Date: 2025-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.

View file

@ -4,7 +4,7 @@
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
* Change Date: 2025-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.

View file

@ -4,7 +4,7 @@
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
* Change Date: 2025-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.

File diff suppressed because it is too large Load diff

View file

@ -4,7 +4,7 @@
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
* Change Date: 2025-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
@ -26,16 +26,10 @@
#include <memory>
#include <redis++/redis++.h>
#include "../node/Metrics.hpp"
extern "C" {
typedef struct pg_conn PGconn;
}
namespace smeeclient {
struct SmeeClient;
}
namespace ZeroTier {
struct RedisConfig;
@ -59,7 +53,6 @@ public:
}
virtual std::shared_ptr<Connection> create() {
Metrics::conn_counter++;
auto c = std::shared_ptr<PostgresConnection>(new PostgresConnection());
c->c = std::make_shared<pqxx::connection>(m_connString);
return std::static_pointer_cast<Connection>(c);
@ -114,7 +107,7 @@ public:
virtual void eraseNetwork(const uint64_t networkId);
virtual void eraseMember(const uint64_t networkId, const uint64_t memberId);
virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress &physicalAddress);
virtual AuthInfo getSSOAuthInfo(const nlohmann::json &member, const std::string &redirectURL);
virtual std::string getSSOAuthURL(const nlohmann::json &member, const std::string &redirectURL);
protected:
struct _PairHasher
@ -145,12 +138,9 @@ private:
void onlineNotificationThread();
void onlineNotification_Postgres();
void onlineNotification_Redis();
uint64_t _doRedisUpdate(sw::redis::Transaction &tx, std::string &controllerId,
void _doRedisUpdate(sw::redis::Transaction &tx, std::string &controllerId,
std::unordered_map< std::pair<uint64_t,uint64_t>,std::pair<int64_t,InetAddress>,_PairHasher > &lastOnline);
void configureSmee();
void notifyNewMember(const std::string &networkID, const std::string &memberID);
enum OverrideMode {
ALLOW_PGBOUNCER_OVERRIDE = 0,
NO_OVERRIDE = 1
@ -184,9 +174,6 @@ private:
RedisConfig *_rc;
std::shared_ptr<sw::redis::Redis> _redis;
std::shared_ptr<sw::redis::RedisCluster> _cluster;
bool _redisMemberStatus;
smeeclient::SmeeClient *_smee;
};
} // namespace ZeroTier

View file

@ -3,7 +3,7 @@ Network Controller Microservice
Every ZeroTier virtual network has a *network controller* responsible for admitting members to the network, issuing certificates, and issuing default configuration information.
This is our reference controller implementation and is almost the same as the one we use to power our own hosted services at [my.zerotier.com](https://my.zerotier.com/). The only difference is the database backend used.
This is our reference controller implementation and is the same one we use to power our own hosted services at [my.zerotier.com](https://my.zerotier.com/). As of ZeroTier One version 1.2.0 this code is included in normal builds for desktop, laptop, and server (Linux, etc.) targets.
Controller data is stored in JSON format under `controller.d` in the ZeroTier working directory. It can be copied, rsync'd, placed in `git`, etc. The files under `controller.d` should not be modified in place while the controller is running or data loss may result, and if they are edited directly take care not to save corrupt JSON since that can also lead to data loss when the controller is restarted. Going through the API is strongly preferred to directly modifying these files.
@ -19,6 +19,10 @@ Since ZeroTier nodes are mobile and do not need static IPs, implementing high av
ZeroTier network controllers can easily be run in Docker or other container systems. Since containers do not need to actually join networks, extra privilege options like "--device=/dev/net/tun --privileged" are not needed. You'll just need to map the local JSON API port of the running controller and allow it to access the Internet (over UDP/9993 at a minimum) so things can reach and query it.
### PostgreSQL Database Implementation
The default controller stores its data in the filesystem in `controller.d` under ZeroTier's home folder. There's an alternative implementation that stores data in PostgreSQL that can be built with `make central-controller`. Right now this is only guaranteed to build and run on Centos 7 Linux with PostgreSQL 10 installed via the [PostgreSQL Yum Repository](https://www.postgresql.org/download/linux/redhat/) and is designed for use with [ZeroTier Central](https://my.zerotier.com/). You're welcome to use it but we don't "officially" support it for end-user use and it could change at any time.
### Upgrading from Older (1.1.14 or earlier) Versions
Older versions of this code used a SQLite database instead of in-filesystem JSON. A migration utility called `migrate-sqlite` is included here and *must* be used to migrate this data to the new format. If the controller is started with an old `controller.db` in its working directory it will terminate after printing an error to *stderr*. This is done to prevent "surprises" for those running DIY controllers using the old code.
@ -39,17 +43,210 @@ While networks with any valid ID can be added to the controller's database, it w
The controller JSON API is *very* sensitive about types. Integers must be integers and strings strings, etc. Incorrect types may be ignored, set to default values, or set to undefined values.
Full documentation of the Controller API can be found on our [documentation site](https://docs.zerotier.com/service/v1#tag/controller)
#### `/controller`
### Prometheus Metrics
* Purpose: Check for controller function and return controller status
* Methods: GET
* Returns: { object }
Controller specific metrics are available from the `/metrics` endpoint.
| Field | Type | Description | Writable |
| ------------------ | ----------- | ------------------------------------------------- | -------- |
| controller | boolean | Always 'true' | no |
| apiVersion | integer | Controller API version, currently 3 | no |
| clock | integer | Current clock on controller, ms since epoch | no |
#### `/controller/network`
* Purpose: List all networks hosted by this controller
* Methods: GET
* Returns: [ string, ... ]
This returns an array of 16-digit hexadecimal network IDs.
#### `/controller/network/<network ID>`
* Purpose: Create, configure, and delete hosted networks
* Methods: GET, POST, DELETE
* Returns: { object }
By making queries to this path you can create, configure, and delete networks. DELETE is final, so don't do it unless you really mean it.
When POSTing new networks take care that their IDs are not in use, otherwise you may overwrite an existing one. To create a new network with a random unused ID, POST to `/controller/network/##########______`. The #'s are the controller's 10-digit ZeroTier address and they're followed by six underscores. Check the `nwid` field of the returned JSON object for your network's newly allocated ID. Subsequent POSTs to this network must refer to its actual path.
Example:
`curl -X POST --header "X-ZT1-Auth: secret" -d '{"name":"my network"}' http://localhost:9993/controller/network/305f406058______`
**Network object format:**
| Field | Type | Description | Writable |
| --------------------- | ------------- | ------------------------------------------------- | -------- |
| id | string | 16-digit network ID | no |
| nwid | string | 16-digit network ID (legacy) | no |
| objtype | string | Always "network" | no |
| name | string | A short name for this network | YES |
| creationTime | integer | Time network record was created (ms since epoch) | no |
| private | boolean | Is access control enabled? | YES |
| enableBroadcast | boolean | Ethernet ff:ff:ff:ff:ff:ff allowed? | YES |
| v4AssignMode | object | IPv4 management and assign options (see below) | YES |
| v6AssignMode | object | IPv6 management and assign options (see below) | YES |
| mtu | integer | Network MTU (default: 2800) | YES |
| multicastLimit | integer | Maximum recipients for a multicast packet | YES |
| revision | integer | Network config revision counter | no |
| routes | array[object] | Managed IPv4 and IPv6 routes; see below | YES |
| ipAssignmentPools | array[object] | IP auto-assign ranges; see below | YES |
| rules | array[object] | Traffic rules; see below | YES |
| capabilities | array[object] | Array of capability objects (see below) | YES |
| tags | array[object] | Array of tag objects (see below) | YES |
| remoteTraceTarget | string | 10-digit ZeroTier ID of remote trace target | YES |
| remoteTraceLevel | integer | Remote trace verbosity level | YES |
* Networks without rules won't carry any traffic. If you don't specify any on network creation an "accept anything" rule set will automatically be added.
* Managed IP address assignments and IP assignment pools that do not fall within a route configured in `routes` are ignored and won't be used or sent to members.
* The default for `private` is `true` and this is probably what you want. Turning `private` off means *anyone* can join your network with only its 16-digit network ID. It's also impossible to de-authorize a member as these networks don't issue or enforce certificates. Such "party line" networks are used for decentralized app backplanes, gaming, and testing but are otherwise not common.
* Changing the MTU can be disruptive and on some operating systems may require a leave/rejoin of the network or a restart of the ZeroTier service.
**Auto-Assign Modes:**
Auto assign modes (`v4AssignMode` and `v6AssignMode`) contain objects that map assignment modes to booleans.
For IPv4 the only valid setting is `zt` which, if true, causes IPv4 addresses to be auto-assigned from `ipAssignmentPools` to members that do not have an IPv4 assignment. Note that active bridges are exempt and will not get auto-assigned IPs since this can interfere with bridging. (You can still manually assign one if you want.)
IPv6 includes this option and two others: `6plane` and `rfc4193`. These assign private IPv6 addresses to each member based on a deterministic assignment scheme that allows members to emulate IPv6 NDP to skip multicast for better performance and scalability. The `rfc4193` mode gives every member a /128 on a /88 network, while `6plane` gives every member a /80 within a /40 network but uses NDP emulation to route *all* IPs under that /80 to its owner. The `6plane` mode is great for use cases like Docker since it allows every member to assign IPv6 addresses within its /80 that just work instantly and globally across the network.
**IP assignment pool object format:**
| Field | Type | Description |
| --------------------- | ------------- | ------------------------------------------------- |
| ipRangeStart | string | Starting IP address in range |
| ipRangeEnd | string | Ending IP address in range (inclusive) |
Pools are only used if auto-assignment is on for the given address type (IPv4 or IPv6) and if the entire range falls within a managed route.
IPv6 ranges work just like IPv4 ranges and look like this:
{
"ipRangeStart": "fd00:feed:feed:beef:0000:0000:0000:0000",
"ipRangeEnd": "fd00:feed:feed:beef:ffff:ffff:ffff:ffff"
}
(You can POST a shortened-form IPv6 address but the API will always report back un-shortened canonical form addresses.)
That defines a range within network `fd00:feed:feed:beef::/64` that contains up to 2^64 addresses. If an IPv6 range is large enough, the controller will assign addresses by placing each member's device ID into the address in a manner similar to the RFC4193 and 6PLANE modes. Otherwise it will assign addresses at random.
**Managed Route object format:**
| Field | Type | Description |
| --------------------- | ------------- | ------------------------------------------------- |
| target | string | Subnet in CIDR notation |
| via | string/null | Next hop router IP address |
Managed Route objects look like this:
{
"target": "10.147.20.0/24"
}
or
{
"target": "192.168.168.0/24",
"via": "10.147.20.1"
}
**Rule object format:**
Each rule is actually a sequence of zero or more `MATCH_` entries in the rule array followed by an `ACTION_` entry that describes what to do if all the preceding entries match. An `ACTION_` without any preceding `MATCH_` entries is always taken, so setting a single `ACTION_ACCEPT` rule yields a network that allows all traffic. If no rules are present the default action is `ACTION_DROP`.
Rules are evaluated in the order in which they appear in the array. There is currently a limit of 256 entries per network. Capabilities should be used if a larger and more complex rule set is needed since they allow rules to be grouped by purpose and only shipped to members that need them.
Each rule table entry has two common fields.
| Field | Type | Description |
| --------------------- | ------------- | ------------------------------------------------- |
| type | string | Entry type (all caps, case sensitive) |
| not | boolean | If true, MATCHes match if they don't match |
The following fields may or may not be present depending on rule type:
| Field | Type | Description |
| --------------------- | ------------- | ------------------------------------------------- |
| zt | string | 10-digit hex ZeroTier address |
| etherType | integer | Ethernet frame type |
| mac | string | Hex MAC address (with or without :'s) |
| ip | string | IPv4 or IPv6 address |
| ipTos | integer | IP type of service |
| ipProtocol | integer | IP protocol (e.g. TCP) |
| start | integer | Start of an integer range (e.g. port range) |
| end | integer | End of an integer range (inclusive) |
| id | integer | Tag ID |
| value | integer | Tag value or comparison value |
| mask | integer | Bit mask (for characteristics flags) |
The entry types and their additional fields are:
| Entry type | Description | Fields |
| ------------------------------- | ----------------------------------------------------------------- | -------------- |
| `ACTION_DROP` | Drop any packets matching this rule | (none) |
| `ACTION_ACCEPT` | Accept any packets matching this rule | (none) |
| `ACTION_TEE` | Send a copy of this packet to a node (rule parsing continues) | `zt` |
| `ACTION_REDIRECT` | Redirect this packet to another node | `zt` |
| `ACTION_DEBUG_LOG` | Output debug info on match (if built with rules engine debug) | (none) |
| `MATCH_SOURCE_ZEROTIER_ADDRESS` | Match VL1 ZeroTier address of packet sender. | `zt` |
| `MATCH_DEST_ZEROTIER_ADDRESS` | Match VL1 ZeroTier address of recipient | `zt` |
| `MATCH_ETHERTYPE` | Match Ethernet frame type | `etherType` |
| `MATCH_MAC_SOURCE` | Match source Ethernet MAC address | `mac` |
| `MATCH_MAC_DEST` | Match destination Ethernet MAC address | `mac` |
| `MATCH_IPV4_SOURCE` | Match source IPv4 address | `ip` |
| `MATCH_IPV4_DEST` | Match destination IPv4 address | `ip` |
| `MATCH_IPV6_SOURCE` | Match source IPv6 address | `ip` |
| `MATCH_IPV6_DEST` | Match destination IPv6 address | `ip` |
| `MATCH_IP_TOS` | Match IP TOS field | `ipTos` |
| `MATCH_IP_PROTOCOL` | Match IP protocol field | `ipProtocol` |
| `MATCH_IP_SOURCE_PORT_RANGE` | Match a source IP port range | `start`,`end` |
| `MATCH_IP_DEST_PORT_RANGE` | Match a destination IP port range | `start`,`end` |
| `MATCH_CHARACTERISTICS` | Match on characteristics flags | `mask`,`value` |
| `MATCH_FRAME_SIZE_RANGE` | Match a range of Ethernet frame sizes | `start`,`end` |
| `MATCH_TAGS_SAMENESS` | Match if both sides' tags differ by no more than value | `id`,`value` |
| `MATCH_TAGS_BITWISE_AND` | Match if both sides' tags AND to value | `id`,`value` |
| `MATCH_TAGS_BITWISE_OR` | Match if both sides' tags OR to value | `id`,`value` |
| `MATCH_TAGS_BITWISE_XOR` | Match if both sides' tags XOR to value | `id`,`value` |
Important notes about rules engine behavior:
* IPv4 and IPv6 IP address rules do not match for frames that are not IPv4 or IPv6 respectively.
* `ACTION_DEBUG_LOG` is a no-op on nodes not built with `ZT_RULES_ENGINE_DEBUGGING` enabled (see Network.cpp). If that is enabled nodes will dump a trace of rule evaluation results to *stdout* when this action is encountered but will otherwise keep evaluating rules. This is used for basic "smoke testing" of the rules engine.
* Multicast packets and packets destined for bridged devices treated a little differently. They are matched more than once. They are matched at the point of send with a NULL ZeroTier destination address, meaning that `MATCH_DEST_ZEROTIER_ADDRESS` is useless. That's because the true VL1 destination is not yet known. Then they are matched again for each true VL1 destination. On these later subsequent matches TEE actions are ignored and REDIRECT rules are interpreted as DROPs. This prevents multiple TEE or REDIRECT packets from being sent to third party devices.
* Rules in capabilities are always matched as if the current device is the sender (inbound == false). A capability specifies sender side rules that can be enforced on both sides.
#### `/controller/network/<network ID>/member`
* Purpose: Get a set of all members on this network
* Methods: GET
* Returns: { object }
This returns a JSON object containing all member IDs as keys and their `memberRevisionCounter` values as values.
#### `/controller/network/<network ID>/member/<address>`
* Purpose: Create, authorize, or remove a network member
* Methods: GET, POST, DELETE
* Returns: { object }
| Field | Type | Description | Writable |
| --------------------- | ------------- | ------------------------------------------------- | -------- |
| id | string | Member's 10-digit ZeroTier address | no |
| address | string | Member's 10-digit ZeroTier address | no |
| nwid | string | 16-digit network ID | no |
| authorized | boolean | Is member authorized? (for private networks) | YES |
| activeBridge | boolean | Member is able to bridge to other Ethernet nets | YES |
| identity | string | Member's public ZeroTier identity (if known) | no |
| ipAssignments | array[string] | Managed IP address assignments | YES |
| revision | integer | Member revision counter | no |
| vMajor | integer | Most recently known major version | no |
| vMinor | integer | Most recently known minor version | no |
| vRev | integer | Most recently known revision | no |
| vProto | integer | Most recently known protocl version | no |
Note that managed IP assignments are only used if they fall within a managed route. Otherwise they are ignored.
| Metric Name | Type | Description |
| --- | --- | --- |
| controller_network_count | Gauge | number of networks the controller is serving |
| controller_member_count | Gauge | number of network members the controller is serving |
| controller_network_change_count | Counter | number of times a network configuration is changed |
| controller_member_change_count | Counter | number of times a network member configuration is changed |
| controller_member_auth_count | Counter | number of network member auths |
| controller_member_deauth_count | Counter | number of network member deauths|

120
debian/changelog vendored
View file

@ -1,123 +1,3 @@
zerotier-one (1.14.2) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Wed, 23 Oct 2024 01:00:00 -0700
zerotier-one (1.14.1) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Wed, 11 Sep 2024 01:00:00 -0700
zerotier-one (1.14.0) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 19 Mar 2024 01:00:00 -0700
zerotier-one (1.12.2) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 12 Sep 2023 01:00:00 -0700
zerotier-one (1.12.1) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Fri, 25 Aug 2023 01:00:00 -0700
zerotier-one (1.12.0) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Thu, 17 Aug 2023 01:00:00 -0700
zerotier-one (1.10.6) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 21 Mar 2023 01:00:00 -0700
zerotier-one (1.10.5) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Fri, 10 Mar 2023 01:00:00 -0700
zerotier-one (1.10.4) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 06 Mar 2023 01:00:00 -0700
zerotier-one (1.10.3) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Sat, 21 Jan 2023 01:00:00 -0700
zerotier-one (1.10.2) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Thu, 13 Oct 2022 01:00:00 -0700
zerotier-one (1.10.1) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 27 Jun 2022 01:00:00 -0700
zerotier-one (1.10.0) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Fri, 03 Jun 2022 01:00:00 -0700
zerotier-one (1.8.10) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 10 May 2022 01:00:00 -0700
zerotier-one (1.8.9) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 25 Apr 2022 01:00:00 -0700
zerotier-one (1.8.8) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 11 Apr 2022 01:00:00 -0700
zerotier-one (1.8.7) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 21 Mar 2022 01:00:00 -0700
zerotier-one (1.8.6) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 07 Mar 2022 01:00:00 -0700
zerotier-one (1.8.5) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Fri, 17 Dec 2021 01:00:00 -0700
zerotier-one (1.8.4) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 23 Nov 2021 01:00:00 -0700
zerotier-one (1.8.3) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.

2
debian/compat vendored
View file

@ -1 +1 @@
10
8

4
debian/control vendored
View file

@ -3,14 +3,14 @@ Maintainer: Adam Ierymenko <adam.ierymenko@zerotier.com>
Section: net
Priority: optional
Standards-Version: 3.9.6
Build-Depends: debhelper
Build-Depends: debhelper (>= 9)
Vcs-Git: git://github.com/zerotier/ZeroTierOne
Vcs-Browser: https://github.com/zerotier/ZeroTierOne
Homepage: https://www.zerotier.com/
Package: zerotier-one
Architecture: any
Depends: adduser, libstdc++6 (>= 5), openssl
Depends: ${shlibs:Depends}, ${misc:Depends}, iproute2, adduser, libstdc++6
Homepage: https://www.zerotier.com/
Description: ZeroTier network virtualization service
ZeroTier One lets you join ZeroTier virtual networks and

View file

@ -10,7 +10,7 @@ Homepage: https://www.zerotier.com/
Package: zerotier-one
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}, libstdc++6
Depends: ${shlibs:Depends}, ${misc:Depends}, iproute, libstdc++6
Homepage: https://www.zerotier.com/
Description: ZeroTier network virtualization service
ZeroTier One lets you join ZeroTier virtual networks and

2
debian/copyright vendored
View file

@ -12,7 +12,7 @@ License: ZeroTier BSL 1.1
Use of this software is governed by the Business Source License included
in the LICENSE.TXT file in the project's root directory.
Change Date: 2026-01-01
Change Date: 2025-01-01
On the date above, in accordance with the Business Source License, use
of this software will be governed by version 2.0 of the Apache License.

2
debian/rules vendored Normal file → Executable file
View file

@ -7,7 +7,7 @@ CXXFLAGS=-O3 -fstack-protector-strong
dh $@ --with systemd
override_dh_auto_build:
make
make -j4
override_dh_systemd_start:
dh_systemd_start --restart-after-upgrade

0
debian/rules.wheezy vendored Normal file → Executable file
View file

View file

@ -1,6 +1,6 @@
[Unit]
Description=ZeroTier One
After=network-online.target network.target
After=network-online.target
Wants=network-online.target
[Service]

View file

@ -3,4 +3,4 @@ Manual Pages and Other Documentation
Use "./build.sh" to build the manual pages.
You'll need either Node.js/npm installed (script will then automatically install the npm *marked-man* package) or */usr/bin/ronn*. The latter is a Ruby program packaged on some distributions as *rubygem-ronn* or *ruby-ronn* or installable as *gem install ronn*. The Node *marked-man* package and *ronn* from RubyGems are two roughly equivalent alternatives for compiling Markdown into roff/man format.
You'll need either NodeJS/npm installed (script will then automatically install the npm *marked-man* package) or */usr/bin/ronn*. The latter is a Ruby program packaged on some distributions as *rubygem-ronn* or *ruby-ronn* or installable as *gem install ronn*. The Node *marked-man* package and *ronn* from rubygems are two roughly equivalent alternatives for compiling MarkDown into roff/man format.

View file

@ -81,7 +81,7 @@ These are found in the service's working directory.
If the ZeroTier One service is built with the network controller enabled, it periodically backs up its controller.db database in this file (currently every 5 minutes if there have been changes). Since this file is not a currently in use SQLite3 database it's safer to back up without corruption. On new backups the file is rotated out rather than being rewritten in place.
* `iddb.d/` (directory):
Caches the public identity of every peer ZeroTier has spoken with in the last 60 days. This directory and its contents can be deleted, but this may result in slower connection initiations since it will require that we go out and re-fetch full identities for peers we're speaking to.
Caches the public identity of every peer ZeroTier has spoken with in the last 60 days. This directory and its contents can be deleted, but this may result in slower connection initations since it will require that we go out and re-fetch full identities for peers we're speaking to.
* `networks.d` (directory):
This caches network configurations and certificate information for networks you belong to. ZeroTier scans this directory for <network ID>.conf files on startup to recall its networks, so "touch"ing an empty <network ID>.conf file in this directory is a way of pre-configuring ZeroTier to join a specific network on startup without using the API. If the config file is empty ZeroTIer will just fetch it from the network's controller.

View file

@ -0,0 +1,23 @@
FROM alpine:3.11.3
ARG go_pkg_url
RUN apk add --update alpine-sdk linux-headers cmake openssh curl
RUN adduser -D -s /bin/ash jenkins && \
passwd -u jenkins && \
ssh-keygen -A && \
mkdir /home/jenkins/.ssh && \
chown -R jenkins:jenkins /home/jenkins
RUN curl -s $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz
COPY authorized_keys /home/jenkins/.ssh/authorized_keys
RUN chown -R jenkins:jenkins /home/jenkins/.ssh && \
chmod 600 /home/jenkins/.ssh/authorized_keys
EXPOSE 22
CMD ["/usr/sbin/sshd", "-D"]

View file

@ -0,0 +1,20 @@
FROM centos:6
ARG go_pkg_url
RUN yum update -y
RUN yum install -y curl git wget openssh-server sudo make rpmdevtools && yum clean all
RUN curl -s $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN echo $'\n\
export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n'\
>> ~/.bash_profile
RUN mkdir /rpmbuild && chmod 777 /rpmbuild
CMD ["/usr/sbin/sshd", "-D"]

View file

@ -0,0 +1,21 @@
FROM i386/centos:6
ARG go_pkg_url
RUN echo i386 > /etc/yum/vars/basearch && echo i686 > /etc/yum/vars/arch
RUN yum install -y curl git wget openssh-server sudo make rpmdevtools && yum clean all
RUN curl -s $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN echo $'\n\
export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n'\
>> ~/.bash_profile
RUN mkdir /rpmbuild && chmod 777 /rpmbuild
CMD ["/usr/sbin/sshd", "-D"]

View file

@ -0,0 +1,25 @@
FROM centos:7
ARG go_pkg_url
RUN yum install -y epel-release
RUN yum install -y curl git wget openssh-server sudo make development-tools rpmdevtools clang gcc-c++ ruby ruby-devel centos-release-scl devtoolset-8 llvm-toolset-7 && yum clean all
RUN curl -s $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN wget -qO- "https://cmake.org/files/v3.15/cmake-3.15.1-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local
RUN /usr/bin/ssh-keygen -A
RUN useradd jenkins-build
RUN echo $'\n\
export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n\
source scl_source enable devtoolset-8 llvm-toolset-7\n'\
>> ~/.bash_profile
RUN mkdir /rpmbuild && chmod 777 /rpmbuild
CMD ["/usr/sbin/sshd", "-D"]

View file

@ -0,0 +1,22 @@
FROM centos:7
ARG go_pkg_url
RUN yum install -y curl git wget openssh-server sudo make development-tools rpmdevtools clang gcc-c++ ruby ruby-devel && yum clean all
RUN curl -s $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN /usr/bin/ssh-keygen -A
RUN useradd jenkins-build
RUN echo $'\n\
export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n'\
>> ~/.bash_profile
RUN mkdir /rpmbuild && chmod 777 /rpmbuild
CMD ["/usr/sbin/sshd", "-D"]

View file

@ -0,0 +1,25 @@
FROM centos:8
ARG go_pkg_url
RUN yum install -y epel-release
RUN yum install -y curl git wget openssh-server sudo make rpmdevtools clang gcc-c++ ruby ruby-devel && yum clean all
RUN curl -s $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN wget -qO- "https://cmake.org/files/v3.15/cmake-3.15.1-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local
RUN /usr/bin/ssh-keygen -A
RUN useradd jenkins-build
RUN echo $'\n\
export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n\
source scl_source enable devtoolset-8 llvm-toolset-7\n'\
>> ~/.bash_profile
RUN mkdir /rpmbuild && chmod 777 /rpmbuild
CMD ["/usr/sbin/sshd", "-D"]

View file

@ -0,0 +1,20 @@
FROM s390x/clefos:7
ARG go_pkg_url
RUN yum install -y curl git wget openssh-server sudo make development-tools rpmdevtools clang gcc-c++ ruby ruby-devel && yum clean all
RUN curl -s $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN /usr/bin/ssh-keygen -A
RUN echo $'\n\
export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n'\
>> ~/.bash_profile
RUN mkdir /rpmbuild && chmod 777 /rpmbuild
CMD ["/usr/sbin/sshd", "-D"]

View file

@ -0,0 +1,15 @@
FROM debian:bullseye-20191224
ARG go_pkg_url
RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd
RUN curl -s -k $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN chmod 777 /home
CMD ["/usr/bin/sshd", "-D"]

View file

@ -0,0 +1,15 @@
FROM debian:buster-20191224
ARG go_pkg_url
RUN apt-get update && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd
RUN curl -s -k $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN chmod 777 /home
CMD ["/usr/bin/sshd", "-D"]

View file

@ -0,0 +1,15 @@
FROM debian:jessie-20191224
ARG go_pkg_url
RUN apt-get update && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd
RUN curl -s -k $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN chmod 777 /home
CMD ["/usr/bin/sshd", "-D"]

View file

@ -0,0 +1,15 @@
FROM debian:sid-20191224
ARG go_pkg_url
RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd
RUN curl -s -k $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN chmod 777 /home
CMD ["/usr/bin/sshd", "-D"]

View file

@ -0,0 +1,15 @@
FROM debian:stretch-20191224
ARG go_pkg_url
RUN apt-get update && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd
RUN curl -s -k $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN chmod 777 /home
CMD ["/usr/bin/sshd", "-D"]

View file

@ -0,0 +1,23 @@
FROM debian:wheezy-20190228
ARG go_pkg_url
RUN echo "deb http://archive.debian.org/debian/ wheezy contrib main non-free" > /etc/apt/sources.list && \
echo "deb-src http://archive.debian.org/debian/ wheezy contrib main non-free" >> /etc/apt/sources.list && \
apt-get update && apt-get install -y apt-utils && \
apt-get install -y --force-yes \
curl gcc make sudo expect gnupg fakeroot perl-base=5.14.2-21+deb7u3 perl \
libc-bin=2.13-38+deb7u10 libc6=2.13-38+deb7u10 libc6-dev build-essential \
cdbs devscripts equivs automake autoconf libtool libaudit-dev selinux-basics \
libdb5.1=5.1.29-5 libdb5.1-dev libssl1.0.0=1.0.1e-2+deb7u20 procps gawk libsigsegv2 \
curl ca-certificates devscripts
RUN curl -s -k $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN chmod 777 /home
CMD ["/usr/bin/sshd", "-D"]

View file

@ -0,0 +1,15 @@
FROM kalilinux/kali-rolling:latest
ARG go_pkg_url
RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd cmake
RUN curl -s -k $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN chmod 777 /home
CMD ["/usr/bin/sshd", "-D"]

View file

@ -0,0 +1,15 @@
FROM ubuntu:bionic-20200112
ARG go_pkg_url
RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd
RUN curl -s -k $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN chmod 777 /home
CMD ["/usr/bin/sshd", "-D"]

View file

@ -0,0 +1,15 @@
FROM ubuntu:eoan-20200114
ARG go_pkg_url
RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd
RUN curl -s -k $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN chmod 777 /home
CMD ["/usr/bin/sshd", "-D"]

View file

@ -0,0 +1,15 @@
FROM ubuntu:trusty-20191217
ARG go_pkg_url
RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd
RUN curl -s -k $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN chmod 777 /home
CMD ["/usr/bin/sshd", "-D"]

View file

@ -0,0 +1,15 @@
FROM ubuntu:xenial-20200114
ARG go_pkg_url
RUN apt-get update && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd
RUN curl -s -k $go_pkg_url -o go.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && \
rm go.tar.gz
RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build
RUN chmod 777 /home
CMD ["/usr/bin/sshd", "-D"]

108
dockerbuild/Makefile Normal file
View file

@ -0,0 +1,108 @@
.PHONY: all
all: alpine centos debian ubuntu kali-rolling
alpine:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.alpine . -t ztbuild/alpine-arm64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.alpine . -t ztbuild/alpine-i386 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.alpine . -t ztbuild/alpine-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.alpine . -t ztbuild/alpine-armel --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.alpine . -t ztbuild/alpine-armhf --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.alpine . -t ztbuild/alpine-ppc64le --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.alpine . -t ztbuild/alpine-s390x --load
centos:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.centos7 . -t ztbuild/centos7-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.centos7-i386 . -t ztbuild/centos7-i386 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.centos6 . -t ztbuild/centos6-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.centos6-i386 . -t ztbuild/centos6-i386 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.centos8 . -t ztbuild/centos8-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.centos8 . -t ztbuild/centos8-arm64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.centos8 . -t ztbuild/centos8-ppc64le --load
debian: debian-wheezy debian-jessie debian-buster debian-stretch debian-bullseye debian-sid
debian-wheezy:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-wheezy . -t ztbuild/debian-wheezy-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-wheezy . -t ztbuild/debian-wheezy-armhf --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-wheezy . -t ztbuild/debian-wheezy-armel --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-wheezy . -t ztbuild/debian-wheezy-i386 --load
debian-jessie:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-jessie . -t ztbuild/debian-jessie-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-jessie . -t ztbuild/debian-jessie-armhf --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-jessie . -t ztbuild/debian-jessie-armel --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-jessie . -t ztbuild/debian-jessie-i386 --load
debian-buster:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-buster . -t ztbuild/debian-buster-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.debian-buster . -t ztbuild/debian-buster-arm64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-buster . -t ztbuild/debian-buster-armel --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-buster . -t ztbuild/debian-buster-armhf --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-buster . -t ztbuild/debian-buster-i386 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.debian-buster . -t ztbuild/debian-buster-ppc64le --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.debian-buster . -t ztbuild/debian-buster-s390x --load
debian-stretch:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-arm64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-armel --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-armhf --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-i386 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-ppc64le --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-s390x --load
debian-bullseye:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-arm64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-armel --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-armhf --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-i386 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-ppc64le --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-s390x --load
debian-sid:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-sid . -t ztbuild/debian-sid-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.debian-sid . -t ztbuild/debian-sid-arm64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-sid . -t ztbuild/debian-sid-armel --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-sid . -t ztbuild/debian-sid-armhf --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-sid . -t ztbuild/debian-sid-i386 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.debian-sid . -t ztbuild/debian-sid-ppc64le --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.debian-sid . -t ztbuild/debian-sid-s390x --load
ubuntu: ubuntu-trusty ubuntu-xenial ubuntu-bionic ubuntu-eoan
ubuntu-trusty:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.ubuntu-trusty . -t ztbuild/ubuntu-trusty-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.ubuntu-trusty . -t ztbuild/ubuntu-trusty-arm64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.ubuntu-trusty . -t ztbuild/ubuntu-trusty-armhf --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.ubuntu-trusty . -t ztbuild/ubuntu-trusty-i386 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.ubuntu-trusty . -t ztbuild/ubuntu-trusty-ppc64le --load
ubuntu-xenial:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-arm64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-armhf --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-i386 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-ppc64le --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-s390x --load
ubuntu-bionic:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-arm64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-armhf --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-i386 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-ppc64le --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-s390x --load
ubuntu-eoan:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-amd64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-arm64 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-armhf --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-i386 --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-ppc64le --load
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-s390x --load
kali-rolling:
@docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.kali-rolling . -t ztbuild/kali-rolling-amd64 --load

View file

@ -0,0 +1,2 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8hgysbj2Luu3aN/Ya2wr4Y9LpUGqWWfn3k+UhIwOIE/Kd7/YpLjxHpseUA1hLnj9kHFShH8eiqoY0S6EDIYrTUwbXMMu8454lX/LcJOCJ9RlSeMMf7vpkxcI7cVRgOA430a3FR7M0Q8vKlyJzxxAEjMIxMyuVyinknfanNt+sQFiDUvOXoacqgZAHBWMlO7wOPyHWHNOzy7g8N0dHiJveKZqX/UUwuqJuS6UBq7MBMSU6TcMvJwHr+AbNvfyIUWCqlTByqFL9cmviRbIvQanxoRxi/5fVUGhtVBXUYvbCdFxDw5W2Svo9fDMm4Z5xWAD7rY1J3AM15RVyRTTtYvgD

13
dockerbuild/pipelint.sh Normal file
View file

@ -0,0 +1,13 @@
# curl (REST API)
# User
JENKINS_USER=grant
# Api key from "/me/configure" on my Jenkins instance
JENKINS_USER_KEY=11edf2d49321321119712c46c6349eaad7
# Url for my local Jenkins instance.
JENKINS_URL=http://$JENKINS_USER:$JENKINS_USER_KEY@jenkins.int.zerotier.com
# JENKINS_CRUMB is needed if your Jenkins master has CRSF protection enabled (which it should)
JENKINS_CRUMB=`curl "$JENKINS_URL/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,\":\",//crumb)"`
curl -X POST -H $JENKINS_CRUMB -F "jenkinsfile=<Jenkinsfile" $JENKINS_URL/pipeline-model-converter/validate

View file

@ -1,7 +1,7 @@
#!/bin/sh
grepzt() {
[ -f /var/lib/zerotier-one/zerotier-one.pid -a -n "$(cat /var/lib/zerotier-one/zerotier-one.pid 2>/dev/null)" -a -d "/proc/$(cat /var/lib/zerotier-one/zerotier-one.pid 2>/dev/null)" ]
[ ! -n "$(cat /var/lib/zerotier-one/zerotier-one.pid)" -a -d "/proc/$(cat /var/lib/zerotier-one/zerotier-one.pid)" ]
return $?
}
@ -9,16 +9,15 @@ mkztfile() {
file=$1
mode=$2
content=$3
echo "creating $file"
mkdir -p /var/lib/zerotier-one
echo -n "$content" > "/var/lib/zerotier-one/$file"
echo "$content" > "/var/lib/zerotier-one/$file"
chmod "$mode" "/var/lib/zerotier-one/$file"
}
if [ "x$ZEROTIER_API_SECRET" != "x" ]
then
mkztfile authtoken.secret 0600 "$ZEROTIER_API_SECRET"
mkztfile metricstoken.secret 0600 "$ZEROTIER_API_SECRET"
fi
if [ "x$ZEROTIER_IDENTITY_PUBLIC" != "x" ]
@ -31,100 +30,36 @@ then
mkztfile identity.secret 0600 "$ZEROTIER_IDENTITY_SECRET"
fi
if [ "x$ZEROTIER_LOCAL_CONF" != "x" ]
then
mkztfile local.conf 0644 "$ZEROTIER_LOCAL_CONF"
fi
mkztfile zerotier-one.port 0600 "9993"
killzerotier() {
log "Killing zerotier"
kill $(cat /var/lib/zerotier-one/zerotier-one.pid 2>/dev/null)
echo "Killing zerotier"
kill $(cat /var/lib/zerotier-one/zerotier-one.pid)
exit 0
}
log_header() {
echo -n "\r=>"
}
log_detail_header() {
echo -n "\r===>"
}
log() {
echo "$(log_header)" "$@"
}
log_params() {
title=$1
shift
log "$title" "[$@]"
}
log_detail() {
echo "$(log_detail_header)" "$@"
}
log_detail_params() {
title=$1
shift
log_detail "$title" "[$@]"
}
trap killzerotier INT TERM
log "Configuring networks to join"
mkdir -p /var/lib/zerotier-one/networks.d
log_params "Joining networks from command line:" $@
for i in "$@"
do
log_detail_params "Configuring join:" "$i"
touch "/var/lib/zerotier-one/networks.d/${i}.conf"
done
if [ "x$ZEROTIER_JOIN_NETWORKS" != "x" ]
then
log_params "Joining networks from environment:" $ZEROTIER_JOIN_NETWORKS
for i in $ZEROTIER_JOIN_NETWORKS
do
log_detail_params "Configuring join:" "$i"
touch "/var/lib/zerotier-one/networks.d/${i}.conf"
done
fi
log "Starting ZeroTier"
echo "starting zerotier"
nohup /usr/sbin/zerotier-one &
while ! grepzt
do
log_detail "ZeroTier hasn't started, waiting a second"
if [ -f nohup.out ]
then
tail -n 10 nohup.out
fi
echo "zerotier hasn't started, waiting a second"
sleep 1
done
log_params "Writing healthcheck for networks:" $@
echo "joining networks: $@"
cat >/healthcheck.sh <<EOF
#!/bin/bash
for i in $@ $ZEROTIER_JOIN_NETWORKS
for i in "$@"
do
[ "\$(zerotier-cli get \$i status)" = "OK" ] || exit 1
done
EOF
echo "joining $i"
chmod +x /healthcheck.sh
log_params "zerotier-cli info:" "$(zerotier-cli info)"
log "Sleeping infinitely"
while true
while ! zerotier-cli join "$i"
do
echo "joining $i failed; trying again in 1s"
sleep 1
done
done
sleep infinity

View file

@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>tap</string>
<key>CFBundleIdentifier</key>
<string>com.zerotier.tap</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>tap</string>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
<string>20150118</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>1.0</string>
<key>OSBundleLibraries</key>
<dict>
<key>com.apple.kpi.mach</key>
<string>8.0</string>
<key>com.apple.kpi.bsd</key>
<string>8.0</string>
<key>com.apple.kpi.libkern</key>
<string>8.0</string>
<key>com.apple.kpi.unsupported</key>
<string>8.0</string>
</dict>
</dict>
</plist>

Binary file not shown.

View file

@ -0,0 +1,105 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>files</key>
<dict/>
<key>files2</key>
<dict/>
<key>rules</key>
<dict>
<key>^Resources/</key>
<true/>
<key>^Resources/.*\.lproj/</key>
<dict>
<key>optional</key>
<true/>
<key>weight</key>
<real>1000</real>
</dict>
<key>^Resources/.*\.lproj/locversion.plist$</key>
<dict>
<key>omit</key>
<true/>
<key>weight</key>
<real>1100</real>
</dict>
<key>^version.plist$</key>
<true/>
</dict>
<key>rules2</key>
<dict>
<key>.*\.dSYM($|/)</key>
<dict>
<key>weight</key>
<real>11</real>
</dict>
<key>^(.*/)?\.DS_Store$</key>
<dict>
<key>omit</key>
<true/>
<key>weight</key>
<real>2000</real>
</dict>
<key>^(Frameworks|SharedFrameworks|PlugIns|Plug-ins|XPCServices|Helpers|MacOS|Library/(Automator|Spotlight|LoginItems))/</key>
<dict>
<key>nested</key>
<true/>
<key>weight</key>
<real>10</real>
</dict>
<key>^.*</key>
<true/>
<key>^Info\.plist$</key>
<dict>
<key>omit</key>
<true/>
<key>weight</key>
<real>20</real>
</dict>
<key>^PkgInfo$</key>
<dict>
<key>omit</key>
<true/>
<key>weight</key>
<real>20</real>
</dict>
<key>^Resources/</key>
<dict>
<key>weight</key>
<real>20</real>
</dict>
<key>^Resources/.*\.lproj/</key>
<dict>
<key>optional</key>
<true/>
<key>weight</key>
<real>1000</real>
</dict>
<key>^Resources/.*\.lproj/locversion.plist$</key>
<dict>
<key>omit</key>
<true/>
<key>weight</key>
<real>1100</real>
</dict>
<key>^[^/]+$</key>
<dict>
<key>nested</key>
<true/>
<key>weight</key>
<real>10</real>
</dict>
<key>^embedded\.provisionprofile$</key>
<dict>
<key>weight</key>
<real>20</real>
</dict>
<key>^version\.plist$</key>
<dict>
<key>weight</key>
<real>20</real>
</dict>
</dict>
</dict>
</plist>

Binary file not shown.

Binary file not shown.

View file

@ -1,15 +1,10 @@
;
; ****************************************************************************
; * Copyright (C) 2002-2014 OpenVPN Technologies, Inc. *
; * This program is free software; you can redistribute it and/or modify *
; * it under the terms of the GNU General Public License version 2 *
; * as published by the Free Software Foundation. *
; ****************************************************************************
;
;
; ZeroTier One Virtual Network Port NDIS6 Driver
;
; Based on the OpenVPN tap-windows6 driver version 9.21.1 git
; commit 48f027cfca52b16b5fd23d82e6016ed8a91fc4d3.
; See: https://github.com/OpenVPN/tap-windows6
;
; Modified by ZeroTier, Inc. - https://www.zerotier.com/
;
; (1) Comment out 'tun' functionality and related features such as DHCP
@ -20,6 +15,14 @@
; (5) Rename/rebrand driver as ZeroTier network port driver.
;
; Original copyright below. Modifications released under GPLv2 as well.
;
; ****************************************************************************
; * Copyright (C) 2002-2014 OpenVPN Technologies, Inc. *
; * This program is free software; you can redistribute it and/or modify *
; * it under the terms of the GNU General Public License version 2 *
; * as published by the Free Software Foundation. *
; ****************************************************************************
;
[Version]
Signature = "$Windows NT$"
@ -27,18 +30,17 @@ CatalogFile = zttap300.cat
ClassGUID = {4d36e972-e325-11ce-bfc1-08002be10318}
Provider = %Provider%
Class = Net
DriverVer=11/24/2020,3.00.00.1
DriverVer=08/13/2015,6.2.9200.20557
[Strings]
DeviceDescription = "ZeroTier Virtual Port"
Provider = "ZeroTier"
DeviceDescription = "ZeroTier One Virtual Port"
Provider = "ZeroTier Networks LLC" ; We're ZeroTier, Inc. now but kernel mode certs are $300+ so fuqdat.
; To build for x86, take NTamd64 off this and off the named section manually, build, then put it back!
[Manufacturer]
%Provider%=zttap300,NTx86
%Provider%=zttap300,NTamd64
%Provider%=zttap300,NTarm64
[zttap300.NTx86]
[zttap300]
%DeviceDescription% = zttap300.ndi, root\zttap300 ; Root enumerated
%DeviceDescription% = zttap300.ndi, zttap300 ; Legacy
@ -46,10 +48,6 @@ Provider = "ZeroTier"
%DeviceDescription% = zttap300.ndi, root\zttap300 ; Root enumerated
%DeviceDescription% = zttap300.ndi, zttap300 ; Legacy
[zttap300.NTarm64]
%DeviceDescription% = zttap300.ndi, root\zttap300 ; Root enumerated
%DeviceDescription% = zttap300.ndi, zttap300 ; Legacy
;----------------- Characteristics ------------
; NCF_PHYSICAL = 0x04
; NCF_VIRTUAL = 0x01

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,143 @@
;
; ZeroTier One Virtual Network Port NDIS6 Driver
;
; Based on the OpenVPN tap-windows6 driver version 9.21.1 git
; commit 48f027cfca52b16b5fd23d82e6016ed8a91fc4d3.
; See: https://github.com/OpenVPN/tap-windows6
;
; Modified by ZeroTier, Inc. - https://www.zerotier.com/
;
; (1) Comment out 'tun' functionality and related features such as DHCP
; emulation, since we don't use any of that. Just want straight 'tap'.
; (2) Added custom IOCTL to enumerate L2 multicast memberships.
; (3) Increase maximum number of multicast memberships to 128.
; (4) Set default and max device MTU to 2800.
; (5) Rename/rebrand driver as ZeroTier network port driver.
;
; Original copyright below. Modifications released under GPLv2 as well.
;
; ****************************************************************************
; * Copyright (C) 2002-2014 OpenVPN Technologies, Inc. *
; * This program is free software; you can redistribute it and/or modify *
; * it under the terms of the GNU General Public License version 2 *
; * as published by the Free Software Foundation. *
; ****************************************************************************
;
[Version]
Signature = "$Windows NT$"
CatalogFile = zttap300.cat
ClassGUID = {4d36e972-e325-11ce-bfc1-08002be10318}
Provider = %Provider%
Class = Net
DriverVer=08/13/2015,6.2.9200.20557
[Strings]
DeviceDescription = "ZeroTier One Virtual Port"
Provider = "ZeroTier Networks LLC" ; We're ZeroTier, Inc. now but kernel mode certs are $300+ so fuqdat.
; To build for x86, take NTamd64 off this and off the named section manually, build, then put it back!
[Manufacturer]
%Provider%=zttap300,NTamd64
[zttap300]
%DeviceDescription% = zttap300.ndi, root\zttap300 ; Root enumerated
%DeviceDescription% = zttap300.ndi, zttap300 ; Legacy
[zttap300.NTamd64]
%DeviceDescription% = zttap300.ndi, root\zttap300 ; Root enumerated
%DeviceDescription% = zttap300.ndi, zttap300 ; Legacy
;----------------- Characteristics ------------
; NCF_PHYSICAL = 0x04
; NCF_VIRTUAL = 0x01
; NCF_SOFTWARE_ENUMERATED = 0x02
; NCF_HIDDEN = 0x08
; NCF_NO_SERVICE = 0x10
; NCF_HAS_UI = 0x80
;----------------- Characteristics ------------
[zttap300.ndi]
CopyFiles = zttap300.driver, zttap300.files
AddReg = zttap300.reg
AddReg = zttap300.params.reg
Characteristics = 0x81
*IfType = 0x6 ; IF_TYPE_ETHERNET_CSMACD
*MediaType = 0x0 ; NdisMedium802_3
*PhysicalMediaType = 14 ; NdisPhysicalMedium802_3
[zttap300.ndi.Services]
AddService = zttap300, 2, zttap300.service
[zttap300.reg]
HKR, Ndi, Service, 0, "zttap300"
HKR, Ndi\Interfaces, UpperRange, 0, "ndis5" ; yes, 'ndis5' is correct... yup, Windows.
HKR, Ndi\Interfaces, LowerRange, 0, "ethernet"
HKR, , Manufacturer, 0, "%Provider%"
HKR, , ProductName, 0, "%DeviceDescription%"
[zttap300.params.reg]
HKR, Ndi\params\MTU, ParamDesc, 0, "MTU"
HKR, Ndi\params\MTU, Type, 0, "int"
HKR, Ndi\params\MTU, Default, 0, "2800"
HKR, Ndi\params\MTU, Optional, 0, "0"
HKR, Ndi\params\MTU, Min, 0, "100"
HKR, Ndi\params\MTU, Max, 0, "2800"
HKR, Ndi\params\MTU, Step, 0, "1"
HKR, Ndi\params\MediaStatus, ParamDesc, 0, "Media Status"
HKR, Ndi\params\MediaStatus, Type, 0, "enum"
HKR, Ndi\params\MediaStatus, Default, 0, "0"
HKR, Ndi\params\MediaStatus, Optional, 0, "0"
HKR, Ndi\params\MediaStatus\enum, "0", 0, "Application Controlled"
HKR, Ndi\params\MediaStatus\enum, "1", 0, "Always Connected"
HKR, Ndi\params\MAC, ParamDesc, 0, "MAC Address"
HKR, Ndi\params\MAC, Type, 0, "edit"
HKR, Ndi\params\MAC, Optional, 0, "1"
HKR, Ndi\params\AllowNonAdmin, ParamDesc, 0, "Non-Admin Access"
HKR, Ndi\params\AllowNonAdmin, Type, 0, "enum"
HKR, Ndi\params\AllowNonAdmin, Default, 0, "0"
HKR, Ndi\params\AllowNonAdmin, Optional, 0, "0"
HKR, Ndi\params\AllowNonAdmin\enum, "0", 0, "Not Allowed"
HKR, Ndi\params\AllowNonAdmin\enum, "1", 0, "Allowed"
;---------- Service Type -------------
; SERVICE_KERNEL_DRIVER = 0x01
; SERVICE_WIN32_OWN_PROCESS = 0x10
;---------- Service Type -------------
;---------- Start Mode ---------------
; SERVICE_BOOT_START = 0x0
; SERVICE_SYSTEM_START = 0x1
; SERVICE_AUTO_START = 0x2
; SERVICE_DEMAND_START = 0x3
; SERVICE_DISABLED = 0x4
;---------- Start Mode ---------------
[zttap300.service]
DisplayName = %DeviceDescription%
ServiceType = 1
StartType = 3
ErrorControl = 1
LoadOrderGroup = NDIS
ServiceBinary = %12%\zttap300.sys
;----------------- Copy Flags ------------
; COPYFLG_NOSKIP = 0x02
; COPYFLG_NOVERSIONCHECK = 0x04
;----------------- Copy Flags ------------
[SourceDisksNames]
1 = %DeviceDescription%, zttap300.sys
[SourceDisksFiles]
zttap300.sys = 1
[DestinationDirs]
zttap300.files = 11
zttap300.driver = 12
[zttap300.files]
;
[zttap300.driver]
zttap300.sys,,,6 ; COPYFLG_NOSKIP | COPYFLG_NOVERSIONCHECK

Binary file not shown.

Binary file not shown.

View file

@ -1,10 +1,10 @@
# Dockerfile for ZeroTier Central Controllers
FROM registry.zerotier.com/zerotier/ctlbuild:latest as builder
FROM registry.zerotier.com/zerotier/controller-builder:latest as builder
MAINTAINER Adam Ierymekno <adam.ierymenko@zerotier.com>, Grant Limberg <grant.limberg@zerotier.com>
ADD . /ZeroTierOne
RUN export PATH=$PATH:~/.cargo/bin && cd ZeroTierOne && make clean && make central-controller -j8
RUN cd ZeroTierOne && make clean && make central-controller -j8
FROM registry.zerotier.com/zerotier/ctlrun:latest
FROM registry.zerotier.com/zerotier/controller-run:latest
COPY --from=builder /ZeroTierOne/zerotier-one /usr/local/bin/zerotier-one
RUN chmod a+x /usr/local/bin/zerotier-one
RUN echo "/usr/local/lib64" > /etc/ld.so.conf.d/usr-local-lib64.conf && ldconfig

View file

@ -1,26 +1,12 @@
# Dockerfile for building ZeroTier Central Controllers
FROM ubuntu:jammy as builder
FROM centos:8 as builder
MAINTAINER Adam Ierymekno <adam.ierymenko@zerotier.com>, Grant Limberg <grant.limberg@zerotier.com>
ARG git_branch=master
RUN apt update && apt upgrade -y
RUN apt -y install \
build-essential \
pkg-config \
bash \
clang \
libjemalloc2 \
libjemalloc-dev \
libpq5 \
libpq-dev \
openssl \
libssl-dev \
postgresql-client \
postgresql-client-common \
curl \
google-perftools \
libgoogle-perftools-dev \
protobuf-compiler
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
RUN yum update -y
RUN yum install -y https://download.postgresql.org/pub/repos/yum/reporpms/EL-8-x86_64/pgdg-redhat-repo-latest.noarch.rpm
RUN dnf -qy module disable postgresql
RUN yum -y install epel-release && yum -y update && yum clean all
RUN yum groupinstall -y "Development Tools" && yum clean all
RUN yum install -y bash cmake postgresql10 postgresql10-devel clang jemalloc jemalloc-devel libpqxx libpqxx-devel && yum clean all

View file

@ -1,15 +1,5 @@
FROM ubuntu:jammy
RUN apt update && apt upgrade -y
RUN apt -y install \
netcat \
postgresql-client \
postgresql-client-common \
libjemalloc2 \
libpq5 \
curl \
binutils \
linux-tools-gke \
perf-tools-unstable \
google-perftools
FROM centos:8
RUN yum install -y https://download.postgresql.org/pub/repos/yum/reporpms/EL-8-x86_64/pgdg-redhat-repo-latest.noarch.rpm
RUN dnf -qy module disable postgresql
RUN yum -y install epel-release && yum -y update && yum clean all
RUN yum install -y jemalloc jemalloc-devel postgresql10 libpqxx && yum clean all

View file

@ -1,16 +0,0 @@
registry = registry.zerotier.com/zerotier
all: controller-builder controller-runbase
buildx:
@echo "docker buildx create"
# docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
docker run --privileged --rm tonistiigi/binfmt --install all
@echo docker buildx create --name multiarch --driver docker-container --use
@echo docker buildx inspect --bootstrap
controller-builder: buildx
docker buildx build --no-cache --platform linux/amd64,linux/arm64 -t $(registry)/ctlbuild:latest -f Dockerfile.builder . --push
controller-runbase: buildx
docker buildx build --no-cache --platform linux/amd64,linux/arm64 -t $(registry)/ctlrun:latest -f Dockerfile.run_base . --push

Some files were not shown because too many files have changed in this diff Show more