Merge branch 'develop' into GRA-1198

This commit is contained in:
0xdcarns 2023-02-24 11:23:41 -05:00
commit e759637750
38 changed files with 369 additions and 967 deletions

View file

@ -31,8 +31,10 @@ body:
label: Version
description: What version are you running?
options:
- v0.18.0
- v0.17.1
- v0.18.2
- v0.18.1
- v0.18.0
- v0.17.1
- v0.17.0
- v0.16.3
- v0.16.2

15
.github/pull_request_template.md vendored Normal file
View file

@ -0,0 +1,15 @@
## Describe your changes
## Provide Issue ticket number if applicable/not in title
## Provide testing steps
## Checklist before requesting a review
- [ ] My changes affect only 10 files or less.
- [ ] I have performed a self-review of my code and tested it.
- [ ] If it is a new feature, I have added thorough tests, my code is <= 1450 lines.
- [ ] If it is a bugfix, my code is <= 200 lines.
- [ ] My functions are <= 80 lines.
- [ ] I have had my code reviewed by a peer.
- [ ] My unit tests pass locally.
- [ ] Netmaker is awesome.

View file

@ -13,61 +13,38 @@ jobs:
version:
runs-on: ubuntu-latest
outputs:
tag: ${{ steps.echo.outputs.tag }}
version: ${{ steps.echo.outputs.version }}
tag: ${{ steps.version.outputs.package_version }}
version: ${{ steps.version.outputs.version }}
steps:
- name: Get Version Number
id: version
run: |
if [[ -n "${{ github.event.inputs.version }}" ]]; then
NETMAKER_VERSION=${{ github.event.inputs.version }}
else
NETMAKER_VERSION=$(curl -fsSL https://api.github.com/repos/gravitl/netmaker/tags | grep 'name' | head -1 | cut -d'"' -f4)
fi
echo "NETMAKER_VERSION=${NETMAKER_VERSION}" >> $GITHUB_ENV
echo "VERSION=${NETMAKER_VERSION}" >> $GITHUB_OUTPUT
# remove everything but digits and . for package (deb, rpm, etc) versions
PACKAGE_VERSION=$(echo ${NETMAKER_VERSION} | tr -cd '[:digit:].')
echo "PACKAGE_VERSION=${PACKAGE_VERSION}" >> $GITHUB_ENV
- name: Echo
id: echo
run: |
echo ${{ env.NETMAKER_VERSION }}
echo ${{ env.PACKAGE_VERSION }}
if [[ -z ${{ env.NETMAKER_VERSION }} || -z ${{ env.PACKAGE_VERSION }} ]]
then
exit 1
fi
echo "::set-output name=tag::${{ env.NETMAKER_VERSION }}"
echo "::set-output name=version::${{ env.PACKAGE_VERSION }}"
netmaker:
echo "PACKAGE_VERSION=${PACKAGE_VERSION}" >> $GITHUB_OUTPUT
netmaker-nmctl:
runs-on: ubuntu-latest
needs: version
steps:
- name: set variables
run: |
echo ${{ needs.version.outputs.tag }} ${{ needs.version.outputs.version }}
TAG=${{needs.version.outputs.tag}}
VERSION=${{needs.version.outputs.version}}
if [[ -z ${VERSION} || -z ${TAG} ]]; then
exit 1
fi
echo "NETMAKER_VERSION=${TAG}" >> $GITHUB_ENV
echo "PACKAGE_VERSION=${VERSION}" >> $GITHUB_ENV
- name: Checkout
uses: actions/checkout@v3
with:
ref: release_${{ needs.version.outputs.version }}
fetch-depth: 0
- run: git fetch --force --tags
- name: Setup go
uses: actions/setup-go@v3
with:
go-version: 1.19
- name: Build
run: |
env CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -ldflags="-X 'main.version=${NETMAKER_VERSION}'" -o build/netmaker main.go
- name: Upload netmaker x86 to Release
uses: svenstaro/upload-release-action@v2
- name: GoReleaser
uses: goreleaser/goreleaser-action@v4
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: build/netmaker
tag: ${{ env.NETMAKER_VERSION }}
overwrite: true
prerelease: true
asset_name: netmaker
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -2,8 +2,9 @@ name: Build go-builder images
on:
workflow_dispatch:
schedule:
- cron: '00 21 * * SUN'
push:
branches:
- 'develop'
jobs:
go-builder:

View file

@ -48,7 +48,6 @@ jobs:
push: true
tags: ${{ github.repository }}:${{ env.TAG }}, ${{ github.repository }}:latest
build-args: |
version=${{ env.TAG }}
tags=ce
docker-ee:
@ -89,5 +88,4 @@ jobs:
push: true
tags: ${{ github.repository }}:${{ env.TAG }}-ee
build-args: |
version=${{ env.TAG }}
tags=ee

1
.gitignore vendored
View file

@ -24,3 +24,4 @@ data/
.idea/
netmaker.exe
netmaker.code-workspace
dist/

33
.goreleaser.yaml Normal file
View file

@ -0,0 +1,33 @@
before:
hooks:
# You may remove this if you don't use go modules.
- go mod tidy
builds:
- main: ./
id: "netmaker"
env:
- CGO_ENABLED=1
ldflags:
- -s -w
targets:
- linux_amd64
binary: netmaker
- main: ./cli
id: "nmctl"
env:
- CGO_ENABLED=0
ldflags:
- -s -w
targets:
- linux_amd64
- linux_arm64
- darwin_amd64
- darwin_arm64
- freebsd_amd64
- windows_amd64
binary: nmctl
archives:
- format: binary
name_template: '{{ .Binary }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}'
changelog:
skip: true

View file

@ -1,18 +1,13 @@
#first stage - builder
FROM gravitl/go-builder as builder
ARG version
ARG tags
WORKDIR /app
COPY . .
ENV GO111MODULE=auto
RUN apk add git
RUN GOOS=linux CGO_ENABLED=1 go build -ldflags="-s -X 'main.version=${version}'" -tags ${tags} .
RUN GOOS=linux CGO_ENABLED=1 go build -ldflags="-s -w" -tags ${tags} .
# RUN go build -tags=ee . -o netmaker main.go
FROM alpine:3.16.2
# add a c lib
RUN apk add gcompat iptables wireguard-tools
# set the working directory
WORKDIR /root/
RUN mkdir -p /etc/netclient/config

View file

@ -17,7 +17,7 @@
<p align="center">
<a href="https://github.com/gravitl/netmaker/releases">
<img src="https://img.shields.io/badge/Version-0.18.0-informational?style=flat-square" />
<img src="https://img.shields.io/badge/Version-0.18.2-informational?style=flat-square" />
</a>
<a href="https://hub.docker.com/r/gravitl/netmaker/tags">
<img src="https://img.shields.io/docker/pulls/gravitl/netmaker?label=downloads" />
@ -57,7 +57,7 @@
3. (optional) Prepare DNS - Set a wildcard subdomain in your DNS for Netmaker, e.g. *.netmaker.example.com
4. Run the script:
`sudo wget -qO /root/nm-quick-interactive.sh https://raw.githubusercontent.com/gravitl/netmaker/master/scripts/nm-quick-interactive.sh && sudo chmod +x /root/nm-quick-interactive.sh && sudo /root/nm-quick-interactive.sh`
`sudo wget -qO /root/nm-quick.sh https://raw.githubusercontent.com/gravitl/netmaker/master/scripts/nm-quick.sh && sudo chmod +x /root/nm-quick.sh && sudo /root/nm-quick.sh`
This script gives you the option to deploy the Community or Enterprise version of Netmaker. If deploying Enterprise, you get a free account with a 50 node limit by default. It also gives you the option to use your own domain (recommended) or an auto-generated domain.

View file

@ -3,7 +3,7 @@ version: "3.4"
services:
netmaker:
container_name: netmaker
image: gravitl/netmaker:v0.18.0-ee
image: gravitl/netmaker:REPLACE_SERVER_IMAGE_TAG
restart: always
volumes:
- dnsconfig:/root/config/dnsconfig
@ -26,8 +26,8 @@ services:
MQ_HOST: "mq"
MQ_PORT: "443"
MQ_SERVER_PORT: "1883"
MQ_PASSWORD: "REPLACE_MQ_PASSWORD"
MQ_USERNAME: "REPLACE_MQ_USERNAME"
MQ_PASSWORD: "REPLACE_MQ_PASSWORD"
STUN_PORT: "3478"
VERBOSITY: "1"
METRICS_EXPORTER: "on"
@ -37,7 +37,7 @@ services:
- "3478:3478/udp"
netmaker-ui:
container_name: netmaker-ui
image: gravitl/netmaker-ui:v0.18.0
image: gravitl/netmaker-ui:REPLACE_UI_IMAGE_TAG
depends_on:
- netmaker
links:

View file

@ -3,7 +3,7 @@ version: "3.4"
services:
netmaker: # The Primary Server for running Netmaker
container_name: netmaker
image: gravitl/netmaker:v0.18.0
image: gravitl/netmaker:REPLACE_SERVER_IMAGE_TAG
restart: always
volumes: # Volume mounts necessary for sql, coredns, and mqtt
- dnsconfig:/root/config/dnsconfig
@ -44,7 +44,7 @@ services:
- "3478:3478/udp" # the stun port
netmaker-ui: # The Netmaker UI Component
container_name: netmaker-ui
image: gravitl/netmaker-ui:v0.18.0
image: gravitl/netmaker-ui:REPLACE_UI_IMAGE_TAG
depends_on:
- netmaker
links:

View file

@ -3,7 +3,7 @@ version: "3.4"
services:
netmaker:
container_name: netmaker
image: gravitl/netmaker:v0.18.0
image: gravitl/netmaker:REPLACE_SERVER_IMAGE_TAG
restart: always
volumes:
- dnsconfig:/root/config/dnsconfig
@ -34,7 +34,7 @@ services:
- "3478:3478/udp"
netmaker-ui:
container_name: netmaker-ui
image: gravitl/netmaker-ui:v0.18.0
image: gravitl/netmaker-ui:REPLACE_UI_IMAGE_TAG
depends_on:
- netmaker
links:

View file

@ -42,7 +42,6 @@ type ServerConfig struct {
AllowedOrigin string `yaml:"allowedorigin"`
NodeID string `yaml:"nodeid"`
RestBackend string `yaml:"restbackend"`
AgentBackend string `yaml:"agentbackend"`
MessageQueueBackend string `yaml:"messagequeuebackend"`
DNSMode string `yaml:"dnsmode"`
DisableRemoteIPCheck string `yaml:"disableremoteipcheck"`
@ -50,9 +49,7 @@ type ServerConfig struct {
SQLConn string `yaml:"sqlconn"`
Platform string `yaml:"platform"`
Database string `yaml:"database"`
DefaultNodeLimit int32 `yaml:"defaultnodelimit"`
Verbosity int32 `yaml:"verbosity"`
ServerCheckinInterval int64 `yaml:"servercheckininterval"`
AuthProvider string `yaml:"authprovider"`
OIDCIssuer string `yaml:"oidcissuer"`
ClientID string `yaml:"clientid"`

View file

@ -4,7 +4,6 @@ server:
masterkey: "" # defaults to 'secretkey' or MASTER_KEY (if set)
allowedorigin: "" # defaults to '*' or CORS_ALLOWED_ORIGIN (if set)
restbackend: "" # defaults to "on" or REST_BACKEND (if set)
agentbackend: "" # defaults to "on" or AGENT_BACKEND (if set)
dnsmode: "" # defaults to "on" or DNS_MODE (if set)
sqlconn: "" # defaults to "http://" or SQL_CONN (if set)
disableremoteipcheck: "" # defaults to "false" or DISABLE_REMOTE_IP_CHECK (if set)

View file

@ -4,7 +4,6 @@ server:
masterkey: ""
allowedorigin: "*"
restbackend: true
agentbackend: true
defaultnetname: "default"
defaultnetrange: "10.10.10.0/24"
createdefault: true
createdefault: true

View file

@ -10,7 +10,7 @@
//
// Schemes: https
// BasePath: /
// Version: 0.18.0
// Version: 0.18.2
// Host: netmaker.io
//
// Consumes:

View file

@ -389,7 +389,7 @@ func createExtClient(w http.ResponseWriter, r *http.Request) {
logger.Log(0, r.Header.Get("user"), "created new ext client on network", networkName)
w.WriteHeader(http.StatusOK)
go func() {
err = mq.PublishExtPeerUpdate(&node)
err = mq.PublishPeerUpdate()
if err != nil {
logger.Log(1, "error setting ext peers on "+nodeid+": "+err.Error())
}
@ -488,7 +488,7 @@ func updateExtClient(w http.ResponseWriter, r *http.Request) {
logger.Log(0, r.Header.Get("user"), "updated ext client", newExtClient.ClientID)
if changedEnabled { // need to send a peer update to the ingress node as enablement of one of it's clients has changed
if ingressNode, err := logic.GetNodeByID(newclient.IngressGatewayID); err == nil {
if err = mq.PublishExtPeerUpdate(&ingressNode); err != nil {
if err = mq.PublishPeerUpdate(); err != nil {
logger.Log(1, "error setting ext peers on", ingressNode.ID.String(), ":", err.Error())
}
}
@ -567,11 +567,10 @@ func deleteExtClient(w http.ResponseWriter, r *http.Request) {
}
go func() {
err = mq.PublishExtPeerUpdate(&ingressnode)
if err != nil {
if err := mq.PublishPeerUpdate(); err != nil {
logger.Log(1, "error setting ext peers on "+ingressnode.ID.String()+": "+err.Error())
}
if err := mq.PublishDeleteExtClientDNS(&extclient); err != nil {
if err = mq.PublishDeleteExtClientDNS(&extclient); err != nil {
logger.Log(1, "error publishing dns update for extclient deletion", err.Error())
}
}()

View file

@ -415,7 +415,6 @@ func getUsersNodes(user models.User) ([]models.Node, error) {
func getNode(w http.ResponseWriter, r *http.Request) {
// set header.
w.Header().Set("Content-Type", "application/json")
nodeRequest := r.Header.Get("requestfrom") == "node"
var params = mux.Vars(r)
@ -434,14 +433,7 @@ func getNode(w http.ResponseWriter, r *http.Request) {
logic.ReturnErrorResponse(w, r, logic.FormatError(err, "internal"))
return
}
peerUpdate, err := logic.GetPeerUpdate(&node, host)
if err != nil && !database.IsEmptyRecord(err) {
logger.Log(0, r.Header.Get("user"),
fmt.Sprintf("error fetching wg peers config for node [ %s ]: %v", nodeid, err))
logic.ReturnErrorResponse(w, r, logic.FormatError(err, "internal"))
return
}
hostPeerUpdate, err := logic.GetPeerUpdateForHost(host)
hostPeerUpdate, err := logic.GetPeerUpdateForHost(node.Network, host, nil)
if err != nil && !database.IsEmptyRecord(err) {
logger.Log(0, r.Header.Get("user"),
fmt.Sprintf("error fetching wg peers config for host [ %s ]: %v", host.ID.String(), err))
@ -449,21 +441,13 @@ func getNode(w http.ResponseWriter, r *http.Request) {
return
}
server := servercfg.GetServerInfo()
network, err := logic.GetNetwork(node.Network)
if err != nil {
logger.Log(0, r.Header.Get("user"),
fmt.Sprintf("error fetching network for node [ %s ] info: %v", nodeid, err))
logic.ReturnErrorResponse(w, r, logic.FormatError(err, "internal"))
return
}
legacy := node.Legacy(host, &server, &network)
response := models.NodeGet{
Node: *legacy,
Node: node,
Host: *host,
Peers: peerUpdate.Peers,
HostPeers: hostPeerUpdate.Peers,
Peers: hostPeerUpdate.NodePeers,
ServerConfig: server,
PeerIDs: peerUpdate.PeerIDs,
PeerIDs: hostPeerUpdate.PeerIDs,
}
if servercfg.Is_EE && nodeRequest {
@ -632,7 +616,7 @@ func createNode(w http.ResponseWriter, r *http.Request) {
return
}
}
hostPeerUpdate, err := logic.GetPeerUpdateForHost(&data.Host)
hostPeerUpdate, err := logic.GetPeerUpdateForHost(networkName, &data.Host, nil)
if err != nil && !database.IsEmptyRecord(err) {
logger.Log(0, r.Header.Get("user"),
fmt.Sprintf("error fetching wg peers config for host [ %s ]: %v", data.Host.ID.String(), err))
@ -1001,10 +985,17 @@ func deleteNode(w http.ResponseWriter, r *http.Request) {
if !fromNode { // notify node change
runUpdates(&node, false)
}
go func() { // notify of peer change
if err := mq.PublishPeerUpdate(); err != nil {
go func(deletedNode *models.Node, fromNode bool) { // notify of peer change
var err error
if fromNode {
err = mq.PublishDeletedNodePeerUpdate(deletedNode)
} else {
err = mq.PublishPeerUpdate()
}
if err != nil {
logger.Log(1, "error publishing peer update ", err.Error())
}
host, err := logic.GetHost(node.HostID.String())
if err != nil {
logger.Log(1, "failed to retrieve host for node", node.ID.String(), err.Error())
@ -1012,7 +1003,7 @@ func deleteNode(w http.ResponseWriter, r *http.Request) {
if err := mq.PublishDNSDelete(&node, host); err != nil {
logger.Log(1, "error publishing dns update", err.Error())
}
}()
}(&node, fromNode)
}
func runUpdates(node *models.Node, ifaceDelta bool) {

View file

@ -3,17 +3,12 @@ server:
apiconn: "api.ping.clustercat.com:443"
apihost: ""
apiport: "8081"
grpcconn: "grpc.ping.clustercat.com:443"
grpchost: ""
grpcport: "50051"
grpcsecure: "on"
mqhost: "localhost"
masterkey: "secretkey"
dnskey: ""
allowedorigin: "*"
nodeid: "netmaker"
restbackend: "on"
agentbackend: "on"
messagequeuebackend: "on"
dnsmode: "on"
disableremoteipcheck: ""
@ -22,9 +17,7 @@ server:
sqlconn: ""
platform: ""
database: "sqlite"
defaultnodelimit: ""
verbosity: 3
servercheckininterval: ""
authprovider: ""
clientid: ""
clientsecret: ""

View file

@ -1,5 +1,6 @@
{
# LetsEncrypt account
# ZeroSSL account
acme_ca https://acme.zerossl.com/v2/DV90
email YOUR_EMAIL
}

View file

@ -16,7 +16,7 @@ spec:
hostNetwork: true
containers:
- name: netclient
image: gravitl/netclient:v0.18.0
image: gravitl/netclient:v0.18.2
env:
- name: TOKEN
value: "TOKEN_VALUE"

View file

@ -28,7 +28,7 @@ spec:
# - "<node label value>"
containers:
- name: netclient
image: gravitl/netclient:v0.18.0
image: gravitl/netclient:v0.18.2
env:
- name: TOKEN
value: "TOKEN_VALUE"

View file

@ -81,7 +81,7 @@ spec:
value: "Kubernetes"
- name: VERBOSITY
value: "3"
image: gravitl/netmaker:v0.18.0
image: gravitl/netmaker:v0.18.2
imagePullPolicy: Always
name: netmaker
ports:

View file

@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: netmaker-ui
image: gravitl/netmaker-ui:v0.18.0
image: gravitl/netmaker-ui:v0.18.2
ports:
- containerPort: 443
env:

View file

@ -4,7 +4,6 @@ import (
"time"
"github.com/gravitl/netmaker/logger"
"github.com/gravitl/netmaker/logic"
proxy_metrics "github.com/gravitl/netmaker/metrics"
"github.com/gravitl/netmaker/models"
"golang.zx2c4.com/wireguard/wgctrl"
@ -71,42 +70,6 @@ func Collect(iface, server, network string, peerMap models.PeerMap) (*models.Met
return &metrics, nil
}
// GetExchangedBytesForNode - get exchanged bytes for current node peers
func GetExchangedBytesForNode(node *models.Node, metrics *models.Metrics) error {
host, err := logic.GetHost(node.HostID.String())
if err != nil {
return err
}
peers, err := logic.GetPeerUpdate(node, host)
if err != nil {
logger.Log(0, "Failed to get peers: ", err.Error())
return err
}
wgclient, err := wgctrl.New()
if err != nil {
return err
}
defer wgclient.Close()
device, err := wgclient.Device(models.WIREGUARD_INTERFACE)
if err != nil {
return err
}
for _, currPeer := range device.Peers {
id := peers.PeerIDs[currPeer.PublicKey.String()].ID
address := peers.PeerIDs[currPeer.PublicKey.String()].Address
if id == "" || address == "" {
logger.Log(0, "attempted to parse metrics for invalid peer from server", id, address)
continue
}
logger.Log(2, "collecting exchanged bytes info for peer: ", address)
peerMetric := metrics.Connectivity[id]
peerMetric.TotalReceived = currPeer.ReceiveBytes
peerMetric.TotalSent = currPeer.TransmitBytes
metrics.Connectivity[id] = peerMetric
}
return nil
}
// == used to fill zero value data for non connected peers ==
func fillUnconnectedData(metrics *models.Metrics, peerMap models.PeerMap) {
for r := range peerMap {

View file

@ -382,22 +382,6 @@ func FindRelay(node *models.Node) *models.Node {
return nil
}
func findNode(ip string) (*models.Node, error) {
nodes, err := GetAllNodes()
if err != nil {
return nil, err
}
for _, node := range nodes {
if node.Address.IP.String() == ip {
return &node, nil
}
if node.Address6.IP.String() == ip {
return &node, nil
}
}
return nil, errors.New("node not found")
}
// GetNetworkIngresses - gets the gateways of a network
func GetNetworkIngresses(network string) ([]models.Node, error) {
var ingresses []models.Node

View file

@ -6,192 +6,16 @@ import (
"log"
"net"
"net/netip"
"sort"
"strconv"
"strings"
"time"
"github.com/gravitl/netmaker/database"
"github.com/gravitl/netmaker/logger"
"github.com/gravitl/netmaker/logic/acls/nodeacls"
"github.com/gravitl/netmaker/models"
"github.com/gravitl/netmaker/netclient/ncutils"
"github.com/gravitl/netmaker/servercfg"
"golang.org/x/exp/slices"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
)
// GetPeersforProxy calculates the peers for a proxy
// TODO ==========================
// TODO ==========================
// TODO ==========================
// TODO ==========================
// TODO ==========================
// revisit this logic with new host/node models.
func GetPeersForProxy(node *models.Node, onlyPeers bool) (models.ProxyManagerPayload, error) {
proxyPayload := models.ProxyManagerPayload{}
var peers []wgtypes.PeerConfig
peerConfMap := make(map[string]models.PeerConf)
var err error
currentPeers, err := GetNetworkNodes(node.Network)
if err != nil {
return proxyPayload, err
}
if !onlyPeers {
if node.IsRelayed {
relayNode := FindRelay(node)
relayHost, err := GetHost(relayNode.HostID.String())
if err != nil {
return proxyPayload, err
}
if relayNode != nil {
host, err := GetHost(relayNode.HostID.String())
if err != nil {
logger.Log(0, "error retrieving host for relay node", relayNode.HostID.String(), err.Error())
}
relayEndpoint, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", relayHost.EndpointIP, host.ListenPort))
if err != nil {
logger.Log(1, "failed to resolve relay node endpoint: ", err.Error())
}
proxyPayload.IsRelayed = true
proxyPayload.RelayedTo = relayEndpoint
} else {
logger.Log(0, "couldn't find relay node for: ", node.ID.String())
}
}
if node.IsRelay {
host, err := GetHost(node.HostID.String())
if err != nil {
logger.Log(0, "error retrieving host for relay node", node.ID.String(), err.Error())
}
relayedNodes, err := GetRelayedNodes(node)
if err != nil {
logger.Log(1, "failed to relayed nodes: ", node.ID.String(), err.Error())
proxyPayload.IsRelay = false
} else {
relayPeersMap := make(map[string]models.RelayedConf)
for _, relayedNode := range relayedNodes {
relayedNode := relayedNode
payload, err := GetPeersForProxy(&relayedNode, true)
if err == nil {
relayedHost, err := GetHost(relayedNode.HostID.String())
if err != nil {
logger.Log(0, "error retrieving host for relayNode", relayedNode.ID.String(), err.Error())
}
relayedEndpoint, udpErr := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", relayedHost.EndpointIP, host.ListenPort))
if udpErr == nil {
relayPeersMap[host.PublicKey.String()] = models.RelayedConf{
RelayedPeerEndpoint: relayedEndpoint,
RelayedPeerPubKey: relayedHost.PublicKey.String(),
Peers: payload.Peers,
}
}
}
}
proxyPayload.IsRelay = true
proxyPayload.RelayedPeerConf = relayPeersMap
}
}
}
for _, peer := range currentPeers {
if peer.ID == node.ID {
//skip yourself
continue
}
host, err := GetHost(peer.HostID.String())
if err != nil {
continue
}
proxyStatus := host.ProxyEnabled
listenPort := host.ListenPort
if proxyStatus {
listenPort = host.ProxyListenPort
if listenPort == 0 {
listenPort = models.NmProxyPort
}
} else if listenPort == 0 {
listenPort = host.ListenPort
}
endpoint, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", host.EndpointIP, listenPort))
if err != nil {
logger.Log(1, "failed to resolve udp addr for node: ", peer.ID.String(), host.EndpointIP.String(), err.Error())
continue
}
allowedips := GetAllowedIPs(node, &peer, nil)
var keepalive time.Duration
if node.PersistentKeepalive != 0 {
// set_keepalive
keepalive = node.PersistentKeepalive
}
peers = append(peers, wgtypes.PeerConfig{
PublicKey: host.PublicKey,
Endpoint: endpoint,
AllowedIPs: allowedips,
PersistentKeepaliveInterval: &keepalive,
ReplaceAllowedIPs: true,
})
peerConfMap[host.PublicKey.String()] = models.PeerConf{
Address: net.ParseIP(peer.PrimaryAddress()),
Proxy: proxyStatus,
PublicListenPort: int32(listenPort),
}
if !onlyPeers && peer.IsRelayed {
relayNode := FindRelay(&peer)
if relayNode != nil {
relayHost, err := GetHost(relayNode.HostID.String())
if err != nil {
logger.Log(0, "error retrieving host for relayNode", relayNode.ID.String(), err.Error())
continue
}
relayTo, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", relayHost.EndpointIP, relayHost.ListenPort))
if err == nil {
peerConfMap[host.PublicKey.String()] = models.PeerConf{
IsRelayed: true,
RelayedTo: relayTo,
Address: net.ParseIP(peer.PrimaryAddress()),
Proxy: proxyStatus,
PublicListenPort: int32(listenPort),
}
}
}
}
}
if node.IsIngressGateway {
var extPeers []wgtypes.PeerConfig
extPeers, peerConfMap, err = getExtPeersForProxy(node, peerConfMap)
if err == nil {
peers = append(peers, extPeers...)
} else if !database.IsEmptyRecord(err) {
logger.Log(1, "error retrieving external clients:", err.Error())
}
}
proxyPayload.IsIngress = node.IsIngressGateway
addr := node.Address
if addr.String() == "" {
addr = node.Address6
}
proxyPayload.Peers = peers
proxyPayload.PeerMap = peerConfMap
//proxyPayload.Network = node.Network
//proxyPayload.InterfaceName = node.Interface
//hardcode or read from host ??
proxyPayload.InterfaceName = models.WIREGUARD_INTERFACE
return proxyPayload, nil
}
// GetProxyUpdateForHost - gets the proxy update for host
func GetProxyUpdateForHost(host *models.Host) (models.ProxyManagerPayload, error) {
proxyPayload := models.ProxyManagerPayload{
@ -217,7 +41,7 @@ func GetProxyUpdateForHost(host *models.Host) (models.ProxyManagerPayload, error
relayPeersMap := make(map[string]models.RelayedConf)
for _, relayedHost := range relayedHosts {
relayedHost := relayedHost
payload, err := GetPeerUpdateForHost(&relayedHost)
payload, err := GetPeerUpdateForHost("", &relayedHost, nil)
if err == nil {
relayedEndpoint, udpErr := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", relayedHost.EndpointIP, GetPeerListenPort(&relayedHost)))
if udpErr == nil {
@ -294,20 +118,29 @@ func GetProxyUpdateForHost(host *models.Host) (models.ProxyManagerPayload, error
}
// GetPeerUpdateForHost - gets the consolidated peer update for the host from all networks
func GetPeerUpdateForHost(host *models.Host) (models.HostPeerUpdate, error) {
func GetPeerUpdateForHost(network string, host *models.Host, deletedNode *models.Node) (models.HostPeerUpdate, error) {
if host == nil {
return models.HostPeerUpdate{}, errors.New("host is nil")
}
// track which nodes are deleted
// after peer calculation, if peer not in list, add delete config of peer
hostPeerUpdate := models.HostPeerUpdate{
Host: *host,
Server: servercfg.GetServer(),
PeerIDs: make(models.HostPeerMap),
HostPeerIDs: make(models.HostPeerMap, 0),
ServerVersion: servercfg.GetVersion(),
ServerAddrs: []models.ServerAddr{},
IngressInfo: models.IngressInfo{
ExtPeers: make(map[string]models.ExtClientInfo),
},
EgressInfo: make(map[string]models.EgressInfo),
PeerIDs: make(models.PeerMap, 0),
Peers: []wgtypes.PeerConfig{},
NodePeers: []wgtypes.PeerConfig{},
}
var deletedNodes = []models.Node{} // used to track deleted nodes
if deletedNode != nil {
deletedNodes = append(deletedNodes, *deletedNode)
}
logger.Log(1, "peer update for host ", host.ID.String())
peerIndexMap := make(map[string]int)
@ -316,7 +149,7 @@ func GetPeerUpdateForHost(host *models.Host) (models.HostPeerUpdate, error) {
if err != nil {
continue
}
if !node.Connected || node.Action == models.NODE_DELETE || node.PendingDelete {
if !node.Connected || node.PendingDelete || node.Action == models.NODE_DELETE {
continue
}
currentPeers, err := GetNetworkNodes(node.Network)
@ -329,10 +162,14 @@ func GetPeerUpdateForHost(host *models.Host) (models.HostPeerUpdate, error) {
nodePeerMap = make(map[string]models.PeerRouteInfo)
}
for _, peer := range currentPeers {
peer := peer
if peer.ID == node.ID {
logger.Log(2, "peer update, skipping self")
//skip yourself
continue
}
if peer.Action == models.NODE_DELETE || peer.PendingDelete {
deletedNodes = append(deletedNodes, peer) // track deleted node for peer update
continue
}
var peerConfig wgtypes.PeerConfig
@ -342,7 +179,7 @@ func GetPeerUpdateForHost(host *models.Host) (models.HostPeerUpdate, error) {
return models.HostPeerUpdate{}, err
}
if !peer.Connected || peer.Action == models.NODE_DELETE || peer.PendingDelete {
if !peer.Connected {
logger.Log(2, "peer update, skipping unconnected node", peer.ID.String())
//skip unconnected nodes
continue
@ -394,6 +231,7 @@ func GetPeerUpdateForHost(host *models.Host) (models.HostPeerUpdate, error) {
_, extPeerIDAndAddrs, err := getExtPeers(&peer)
if err == nil {
for _, extPeerIdAndAddr := range extPeerIDAndAddrs {
extPeerIdAndAddr := extPeerIdAndAddr
nodePeerMap[extPeerIdAndAddr.ID] = models.PeerRouteInfo{
PeerAddr: net.IPNet{
IP: net.ParseIP(extPeerIdAndAddr.Address),
@ -419,28 +257,40 @@ func GetPeerUpdateForHost(host *models.Host) (models.HostPeerUpdate, error) {
}
}
if _, ok := hostPeerUpdate.PeerIDs[peerHost.PublicKey.String()]; !ok {
hostPeerUpdate.PeerIDs[peerHost.PublicKey.String()] = make(map[string]models.IDandAddr)
var nodePeer wgtypes.PeerConfig
if _, ok := hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()]; !ok {
hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()] = make(map[string]models.IDandAddr)
hostPeerUpdate.Peers = append(hostPeerUpdate.Peers, peerConfig)
peerIndexMap[peerHost.PublicKey.String()] = len(hostPeerUpdate.Peers) - 1
hostPeerUpdate.PeerIDs[peerHost.PublicKey.String()][peer.ID.String()] = models.IDandAddr{
hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()][peer.ID.String()] = models.IDandAddr{
ID: peer.ID.String(),
Address: peer.PrimaryAddress(),
Name: peerHost.Name,
Network: peer.Network,
}
nodePeer = peerConfig
} else {
peerAllowedIPs := hostPeerUpdate.Peers[peerIndexMap[peerHost.PublicKey.String()]].AllowedIPs
peerAllowedIPs = append(peerAllowedIPs, allowedips...)
hostPeerUpdate.Peers[peerIndexMap[peerHost.PublicKey.String()]].AllowedIPs = peerAllowedIPs
hostPeerUpdate.PeerIDs[peerHost.PublicKey.String()][peer.ID.String()] = models.IDandAddr{
hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()][peer.ID.String()] = models.IDandAddr{
ID: peer.ID.String(),
Address: peer.PrimaryAddress(),
Name: peerHost.Name,
Network: peer.Network,
}
nodePeer = hostPeerUpdate.Peers[peerIndexMap[peerHost.PublicKey.String()]]
}
if node.Network == network { // add to peers map for metrics
hostPeerUpdate.PeerIDs[peerHost.PublicKey.String()] = models.IDandAddr{
ID: peer.ID.String(),
Address: peer.PrimaryAddress(),
Name: peerHost.Name,
Network: peer.Network,
}
hostPeerUpdate.NodePeers = append(hostPeerUpdate.NodePeers, nodePeer)
}
}
var extPeers []wgtypes.PeerConfig
var extPeerIDAndAddrs []models.IDandAddr
@ -448,6 +298,7 @@ func GetPeerUpdateForHost(host *models.Host) (models.HostPeerUpdate, error) {
extPeers, extPeerIDAndAddrs, err = getExtPeers(&node)
if err == nil {
for _, extPeerIdAndAddr := range extPeerIDAndAddrs {
extPeerIdAndAddr := extPeerIdAndAddr
nodePeerMap[extPeerIdAndAddr.ID] = models.PeerRouteInfo{
PeerAddr: net.IPNet{
IP: net.ParseIP(extPeerIdAndAddr.Address),
@ -459,8 +310,9 @@ func GetPeerUpdateForHost(host *models.Host) (models.HostPeerUpdate, error) {
}
hostPeerUpdate.Peers = append(hostPeerUpdate.Peers, extPeers...)
for _, extPeerIdAndAddr := range extPeerIDAndAddrs {
hostPeerUpdate.PeerIDs[extPeerIdAndAddr.ID] = make(map[string]models.IDandAddr)
hostPeerUpdate.PeerIDs[extPeerIdAndAddr.ID][extPeerIdAndAddr.ID] = models.IDandAddr{
extPeerIdAndAddr := extPeerIdAndAddr
hostPeerUpdate.HostPeerIDs[extPeerIdAndAddr.ID] = make(map[string]models.IDandAddr)
hostPeerUpdate.HostPeerIDs[extPeerIdAndAddr.ID][extPeerIdAndAddr.ID] = models.IDandAddr{
ID: extPeerIdAndAddr.ID,
Address: extPeerIdAndAddr.Address,
Name: extPeerIdAndAddr.Name,
@ -480,8 +332,11 @@ func GetPeerUpdateForHost(host *models.Host) (models.HostPeerUpdate, error) {
ExtPeerKey: extPeerIdAndAddr.ID,
Peers: nodePeerMap,
}
if node.Network == network {
hostPeerUpdate.PeerIDs[extPeerIdAndAddr.ID] = extPeerIdAndAddr
hostPeerUpdate.NodePeers = append(hostPeerUpdate.NodePeers, extPeers...)
}
}
} else if !database.IsEmptyRecord(err) {
logger.Log(1, "error retrieving external clients:", err.Error())
}
@ -500,9 +355,32 @@ func GetPeerUpdateForHost(host *models.Host) (models.HostPeerUpdate, error) {
}
}
// run through delete nodes
if len(deletedNodes) > 0 {
for i := range deletedNodes {
delNode := deletedNodes[i]
delHost, err := GetHost(delNode.HostID.String())
if err != nil {
continue
}
if _, ok := hostPeerUpdate.HostPeerIDs[delHost.PublicKey.String()]; !ok {
var peerConfig = wgtypes.PeerConfig{}
peerConfig.PublicKey = delHost.PublicKey
peerConfig.Endpoint = &net.UDPAddr{
IP: delHost.EndpointIP,
Port: GetPeerListenPort(delHost),
}
peerConfig.Remove = true
peerConfig.AllowedIPs = []net.IPNet{delNode.Address, delNode.Address6}
hostPeerUpdate.Peers = append(hostPeerUpdate.Peers, peerConfig)
}
}
}
return hostPeerUpdate, nil
}
// GetPeerListenPort - given a host, retrieve it's appropriate listening port
func GetPeerListenPort(host *models.Host) int {
peerPort := host.ListenPort
if host.ProxyEnabled {
@ -515,284 +393,6 @@ func GetPeerListenPort(host *models.Host) int {
return peerPort
}
// GetPeerUpdate - gets a wireguard peer config for each peer of a node
func GetPeerUpdate(node *models.Node, host *models.Host) (models.PeerUpdate, error) {
log.Println("peer update for node ", node.ID)
peerUpdate := models.PeerUpdate{
Network: node.Network,
ServerVersion: ncutils.Version,
PeerIDs: make(models.PeerMap),
}
currentPeers, err := GetNetworkNodes(node.Network)
if err != nil {
log.Println("no network nodes")
return models.PeerUpdate{}, err
}
for _, peer := range currentPeers {
var peerConfig wgtypes.PeerConfig
peerHost, err := GetHost(peer.HostID.String())
if err != nil {
log.Println("no peer host", err)
return models.PeerUpdate{}, err
}
if peer.ID == node.ID {
log.Println("peer update, skipping self")
//skip yourself
continue
}
if !peer.Connected {
log.Println("peer update, skipping unconnected node")
//skip unconnected nodes
continue
}
if !nodeacls.AreNodesAllowed(nodeacls.NetworkID(node.Network), nodeacls.NodeID(node.ID.String()), nodeacls.NodeID(peer.ID.String())) {
log.Println("peer update, skipping node for acl")
//skip if not permitted by acl
continue
}
peerConfig.PublicKey = peerHost.PublicKey
peerConfig.PersistentKeepaliveInterval = &peer.PersistentKeepalive
peerConfig.ReplaceAllowedIPs = true
uselocal := false
if host.EndpointIP.String() == peerHost.EndpointIP.String() {
//peer is on same network
// set to localaddress
uselocal = true
if node.LocalAddress.IP == nil {
// use public endpint
uselocal = false
}
if node.LocalAddress.String() == peer.LocalAddress.String() {
uselocal = false
}
}
peerConfig.Endpoint = &net.UDPAddr{
IP: peerHost.EndpointIP,
Port: peerHost.ListenPort,
}
if peerHost.ProxyEnabled {
peerConfig.Endpoint.Port = GetPeerListenPort(peerHost)
}
if uselocal {
peerConfig.Endpoint.IP = peer.LocalAddress.IP
}
allowedips := getNodeAllowedIPs(&peer, node)
if peer.IsIngressGateway {
for _, entry := range peer.IngressGatewayRange {
_, cidr, err := net.ParseCIDR(string(entry))
if err == nil {
allowedips = append(allowedips, *cidr)
}
}
}
if peer.IsEgressGateway {
allowedips = append(allowedips, getEgressIPs(node, &peer)...)
}
peerUpdate.PeerIDs[peerHost.PublicKey.String()] = models.IDandAddr{
ID: peer.ID.String(),
Address: peer.PrimaryAddress(),
Name: peerHost.Name,
Network: peer.Network,
}
peerConfig.AllowedIPs = allowedips
peerUpdate.Peers = append(peerUpdate.Peers, peerConfig)
}
return peerUpdate, nil
}
// func getRelayAllowedIPs(node, peer *models.Node) []net.IPNet {
// var allowedips []net.IPNet
// var allowedip net.IPNet
// for _, addr := range peer.RelayAddrs {
// if node.Address.IP.String() == addr {
// continue
// }
// if node.Address6.IP.String() == addr {
// continue
// }
// allowedip.IP = net.ParseIP(addr)
// allowedips = append(allowedips, allowedip)
// }
// return allowedips
// }
// GetPeerUpdateLegacy - gets a wireguard peer config for each peer of a node
func GetPeerUpdateLegacy(node *models.Node) (models.PeerUpdate, error) {
var peerUpdate models.PeerUpdate
var peers []wgtypes.PeerConfig
var serverNodeAddresses = []models.ServerAddr{}
var peerMap = make(models.PeerMap)
var metrics *models.Metrics
if servercfg.Is_EE {
metrics, _ = GetMetrics(node.ID.String())
}
if metrics == nil {
metrics = &models.Metrics{}
}
if metrics.FailoverPeers == nil {
metrics.FailoverPeers = make(map[string]string)
}
// udppeers = the peers parsed from the local interface
// gives us correct port to reach
udppeers, errN := database.GetPeers(node.Network)
if errN != nil {
logger.Log(2, errN.Error())
}
currentPeers, err := GetNetworkNodes(node.Network)
if err != nil {
return models.PeerUpdate{}, err
}
host, err := GetHost(node.HostID.String())
if err != nil {
return peerUpdate, err
}
if node.IsRelayed && !host.ProxyEnabled {
return GetPeerUpdateForRelayedNode(node, udppeers)
}
// #1 Set Keepalive values: set_keepalive
// #2 Set local address: set_local - could be a LOT BETTER and fix some bugs with additional logic
// #3 Set allowedips: set_allowedips
for _, peer := range currentPeers {
peerHost, err := GetHost(peer.HostID.String())
if err != nil {
logger.Log(0, "error retrieving host for peer", node.ID.String(), err.Error())
return models.PeerUpdate{}, err
}
if peer.ID == node.ID {
//skip yourself
continue
}
if node.Connected {
//skip unconnected nodes
continue
}
// if the node is not a server, set the endpoint
var setEndpoint = true
if peer.IsRelayed {
if !peerHost.ProxyEnabled && !(node.IsRelay && ncutils.StringSliceContains(node.RelayAddrs, peer.PrimaryAddress())) {
//skip -- will be added to relay
continue
} else if node.IsRelay && ncutils.StringSliceContains(node.RelayAddrs, peer.PrimaryAddress()) {
// dont set peer endpoint if it's relayed by node
setEndpoint = false
}
}
if !nodeacls.AreNodesAllowed(nodeacls.NetworkID(node.Network), nodeacls.NodeID(node.ID.String()), nodeacls.NodeID(peer.ID.String())) {
//skip if not permitted by acl
continue
}
if len(metrics.FailoverPeers[peer.ID.String()]) > 0 && IsFailoverPresent(node.Network) {
logger.Log(2, "peer", peer.ID.String(), peer.PrimaryAddress(), "was found to be in failover peers list for node", node.ID.String(), node.PrimaryAddress())
continue
}
if err != nil {
return models.PeerUpdate{}, err
}
host, err := GetHost(node.HostID.String())
if err != nil {
logger.Log(0, "error retrieving host for node", node.ID.String(), err.Error())
return models.PeerUpdate{}, err
}
if host.EndpointIP.String() == peerHost.EndpointIP.String() {
//peer is on same network
// set_local
if node.LocalAddress.String() != peer.LocalAddress.String() && peer.LocalAddress.IP != nil {
peerHost.EndpointIP = peer.LocalAddress.IP
if peerHost.ListenPort != 0 {
peerHost.ListenPort = GetPeerListenPort(peerHost)
}
} else {
continue
}
}
// set address if setEndpoint is true
// otherwise, will get inserted as empty value
var address *net.UDPAddr
// Sets ListenPort to UDP Hole Punching Port assuming:
// - UDP Hole Punching is enabled
// - udppeers retrieval did not return an error
// - the endpoint is valid
if setEndpoint {
var setUDPPort = false
if CheckEndpoint(udppeers[peerHost.PublicKey.String()]) {
endpointstring := udppeers[peerHost.PublicKey.String()]
endpointarr := strings.Split(endpointstring, ":")
if len(endpointarr) == 2 {
port, err := strconv.Atoi(endpointarr[1])
if err == nil {
setUDPPort = true
peerHost.ListenPort = port
}
}
}
// if udp hole punching is on, but udp hole punching did not set it, use the LocalListenPort instead
// or, if port is for some reason zero use the LocalListenPort
// but only do this if LocalListenPort is not zero
if ((!setUDPPort) || peerHost.ListenPort == 0) && peerHost.ListenPort != 0 {
peerHost.ListenPort = GetPeerListenPort(peerHost)
}
endpoint := peerHost.EndpointIP.String() + ":" + strconv.FormatInt(int64(peerHost.ListenPort), 10)
address, err = net.ResolveUDPAddr("udp", endpoint)
if err != nil {
return models.PeerUpdate{}, err
}
}
allowedips := GetAllowedIPs(node, &peer, metrics)
var keepalive time.Duration
if node.PersistentKeepalive != 0 {
// set_keepalive
keepalive = node.PersistentKeepalive
}
var peerData = wgtypes.PeerConfig{
PublicKey: peerHost.PublicKey,
Endpoint: address,
ReplaceAllowedIPs: true,
AllowedIPs: allowedips,
PersistentKeepaliveInterval: &keepalive,
}
peers = append(peers, peerData)
peerMap[peerHost.PublicKey.String()] = models.IDandAddr{
Name: peerHost.Name,
ID: peer.ID.String(),
Address: peer.PrimaryAddress(),
IsServer: "no",
}
}
if node.IsIngressGateway {
extPeers, idsAndAddr, err := getExtPeers(node)
if err == nil {
peers = append(peers, extPeers...)
for i := range idsAndAddr {
peerMap[idsAndAddr[i].ID] = idsAndAddr[i]
}
} else if !database.IsEmptyRecord(err) {
logger.Log(1, "error retrieving external clients:", err.Error())
}
}
peerUpdate.Network = node.Network
peerUpdate.ServerVersion = servercfg.Version
sort.SliceStable(peers[:], func(i, j int) bool {
return peers[i].PublicKey.String() < peers[j].PublicKey.String()
})
peerUpdate.Peers = peers
peerUpdate.ServerAddrs = serverNodeAddresses
peerUpdate.PeerIDs = peerMap
return peerUpdate, nil
}
func getExtPeers(node *models.Node) ([]wgtypes.PeerConfig, []models.IDandAddr, error) {
var peers []wgtypes.PeerConfig
var idsAndAddr []models.IDandAddr
@ -959,147 +559,6 @@ func GetAllowedIPs(node, peer *models.Node, metrics *models.Metrics) []net.IPNet
return allowedips
}
// GetPeerUpdateForRelayedNode - calculates peer update for a relayed node by getting the relay
// copying the relay node's allowed ips and making appropriate substitutions
func GetPeerUpdateForRelayedNode(node *models.Node, udppeers map[string]string) (models.PeerUpdate, error) {
var peerUpdate models.PeerUpdate
var peers []wgtypes.PeerConfig
var serverNodeAddresses = []models.ServerAddr{}
var allowedips []net.IPNet
//find node that is relaying us
relay := FindRelay(node)
if relay == nil {
return models.PeerUpdate{}, errors.New("not found")
}
//add relay to lists of allowed ip
if relay.Address.IP != nil {
relayIP := relay.Address
allowedips = append(allowedips, relayIP)
}
if relay.Address6.IP != nil {
relayIP6 := relay.Address6
allowedips = append(allowedips, relayIP6)
}
//get PeerUpdate for relayed node
relayHost, err := GetHost(relay.HostID.String())
if err != nil {
return models.PeerUpdate{}, err
}
relayPeerUpdate, err := GetPeerUpdate(relay, relayHost)
if err != nil {
return models.PeerUpdate{}, err
}
//add the relays allowed ips from all of the relay's peers
for _, peer := range relayPeerUpdate.Peers {
allowedips = append(allowedips, peer.AllowedIPs...)
}
//delete any ips not permitted by acl
for i := len(allowedips) - 1; i >= 0; i-- {
target, err := findNode(allowedips[i].IP.String())
if err != nil {
logger.Log(0, "failed to find node for ip", allowedips[i].IP.String(), err.Error())
continue
}
if target == nil {
logger.Log(0, "failed to find node for ip", allowedips[i].IP.String())
continue
}
if !nodeacls.AreNodesAllowed(nodeacls.NetworkID(node.Network), nodeacls.NodeID(node.ID.String()), nodeacls.NodeID(target.ID.String())) {
logger.Log(0, "deleting node from relayednode per acl", node.ID.String(), target.ID.String())
allowedips = append(allowedips[:i], allowedips[i+1:]...)
}
}
//delete self from allowed ips
for i := len(allowedips) - 1; i >= 0; i-- {
if allowedips[i].IP.String() == node.Address.IP.String() || allowedips[i].IP.String() == node.Address6.IP.String() {
allowedips = append(allowedips[:i], allowedips[i+1:]...)
}
}
//delete egressrange from allowedip if we are egress gateway
if node.IsEgressGateway {
for i := len(allowedips) - 1; i >= 0; i-- {
if StringSliceContains(node.EgressGatewayRanges, allowedips[i].String()) {
allowedips = append(allowedips[:i], allowedips[i+1:]...)
}
}
}
//delete extclients from allowedip if we are ingress gateway
if node.IsIngressGateway {
for i := len(allowedips) - 1; i >= 0; i-- {
if strings.Contains(node.IngressGatewayRange, allowedips[i].IP.String()) {
allowedips = append(allowedips[:i], allowedips[i+1:]...)
}
}
}
//add egress range if relay is egress
if relay.IsEgressGateway {
var ip *net.IPNet
for _, cidr := range relay.EgressGatewayRanges {
_, ip, err = net.ParseCIDR(cidr)
if err != nil {
continue
}
}
allowedips = append(allowedips, *ip)
}
var setUDPPort = false
var listenPort int
if CheckEndpoint(udppeers[relayHost.PublicKey.String()]) {
endpointstring := udppeers[relayHost.PublicKey.String()]
endpointarr := strings.Split(endpointstring, ":")
if len(endpointarr) == 2 {
port, err := strconv.Atoi(endpointarr[1])
if err == nil {
setUDPPort = true
listenPort = port
}
}
}
// if udp hole punching is on, but udp hole punching did not set it, use the LocalListenPort instead
// or, if port is for some reason zero use the LocalListenPort
// but only do this if LocalListenPort is not zero
if ((!setUDPPort) || relayHost.ListenPort == 0) && relayHost.ListenPort != 0 {
listenPort = relayHost.ListenPort
}
endpoint := relayHost.EndpointIP.String() + ":" + strconv.FormatInt(int64(listenPort), 10)
address, err := net.ResolveUDPAddr("udp", endpoint)
if err != nil {
return models.PeerUpdate{}, err
}
var keepalive time.Duration
if node.PersistentKeepalive != 0 {
// set_keepalive
keepalive = node.PersistentKeepalive
}
var peerData = wgtypes.PeerConfig{
PublicKey: relayHost.PublicKey,
Endpoint: address,
ReplaceAllowedIPs: true,
AllowedIPs: allowedips,
PersistentKeepaliveInterval: &keepalive,
}
peers = append(peers, peerData)
//if ingress add extclients
if node.IsIngressGateway {
extPeers, _, err := getExtPeers(node)
if err == nil {
peers = append(peers, extPeers...)
} else {
logger.Log(2, "could not retrieve ext peers for ", node.ID.String(), err.Error())
}
}
peerUpdate.Network = node.Network
peerUpdate.ServerVersion = servercfg.Version
sort.SliceStable(peers[:], func(i, j int) bool {
return peers[i].PublicKey.String() < peers[j].PublicKey.String()
})
peerUpdate.Peers = peers
peerUpdate.ServerAddrs = serverNodeAddresses
return peerUpdate, nil
}
func getEgressIPs(node, peer *models.Node) []net.IPNet {
host, err := GetHost(node.HostID.String())
if err != nil {

View file

@ -10,8 +10,8 @@ import (
)
const (
// ZOMBIE_TIMEOUT - timeout in seconds for checking zombie status
ZOMBIE_TIMEOUT = 60
// ZOMBIE_TIMEOUT - timeout in hours for checking zombie status
ZOMBIE_TIMEOUT = 6
// ZOMBIE_DELETE_TIME - timeout in minutes for zombie node deletion
ZOMBIE_DELETE_TIME = 10
)
@ -86,7 +86,7 @@ func ManageZombies(ctx context.Context, peerUpdate chan *models.Node) {
zombies = append(zombies, id)
case id := <-newHostZombie:
hostZombies = append(hostZombies, id)
case <-time.After(time.Second * ZOMBIE_TIMEOUT):
case <-time.After(time.Hour * ZOMBIE_TIMEOUT): // run this check 4 times a day
logger.Log(3, "checking for zombie nodes")
if len(zombies) > 0 {
for i := len(zombies) - 1; i >= 0; i-- {

View file

@ -27,7 +27,7 @@ import (
stunserver "github.com/gravitl/netmaker/stun-server"
)
var version = "v0.18.0"
var version = "v0.18.2"
// Start DB Connection and start API Request Handler
func main() {
@ -136,8 +136,8 @@ func startControllers() {
go runMessageQueue(&waitnetwork)
}
if !servercfg.IsAgentBackend() && !servercfg.IsRestBackend() && !servercfg.IsMessageQueueBackend() {
logger.Log(0, "No Server Mode selected, so nothing is being served! Set Agent mode (AGENT_BACKEND) or Rest mode (REST_BACKEND) or MessageQueue (MESSAGEQUEUE_BACKEND) to 'true'.")
if !servercfg.IsRestBackend() && !servercfg.IsMessageQueueBackend() {
logger.Log(0, "No Server Mode selected, so nothing is being served! Set Rest mode (REST_BACKEND) or MessageQueue (MESSAGEQUEUE_BACKEND) to 'true'.")
}
// starts the stun server

View file

@ -6,27 +6,19 @@ import (
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
)
// PeerUpdate - struct
type PeerUpdate struct {
Network string `json:"network" bson:"network" yaml:"network"`
ServerVersion string `json:"serverversion" bson:"serverversion" yaml:"serverversion"`
ServerAddrs []ServerAddr `json:"serveraddrs" bson:"serveraddrs" yaml:"serveraddrs"`
Peers []wgtypes.PeerConfig `json:"peers" bson:"peers" yaml:"peers"`
PeerIDs PeerMap `json:"peerids" bson:"peerids" yaml:"peerids"`
ProxyUpdate ProxyManagerPayload `json:"proxy_update" bson:"proxy_update" yaml:"proxy_update"`
}
// HostPeerUpdate - struct for host peer updates
type HostPeerUpdate struct {
Host Host `json:"host" bson:"host" yaml:"host"`
Server string `json:"server" bson:"server" yaml:"server"`
ServerVersion string `json:"serverversion" bson:"serverversion" yaml:"serverversion"`
ServerAddrs []ServerAddr `json:"serveraddrs" bson:"serveraddrs" yaml:"serveraddrs"`
Peers []wgtypes.PeerConfig `json:"peers" bson:"peers" yaml:"peers"`
PeerIDs HostPeerMap `json:"peerids" bson:"peerids" yaml:"peerids"`
Host Host `json:"host" bson:"host" yaml:"host"`
Server string `json:"server" bson:"server" yaml:"server"`
ServerVersion string `json:"serverversion" bson:"serverversion" yaml:"serverversion"`
ServerAddrs []ServerAddr `json:"serveraddrs" bson:"serveraddrs" yaml:"serveraddrs"`
NodePeers []wgtypes.PeerConfig `json:"peers" bson:"peers" yaml:"peers"`
Peers []wgtypes.PeerConfig
HostPeerIDs HostPeerMap `json:"hostpeerids" bson:"hostpeerids" yaml:"hostpeerids"`
ProxyUpdate ProxyManagerPayload `json:"proxy_update" bson:"proxy_update" yaml:"proxy_update"`
EgressInfo map[string]EgressInfo `json:"egress_info" bson:"egress_info" yaml:"egress_info"` // map key is node ID
IngressInfo IngressInfo `json:"ingress_info" bson:"ext_peers" yaml:"ext_peers"`
PeerIDs PeerMap `json:"peerids" bson:"peerids" yaml:"peerids"`
}
// IngressInfo - struct for ingress info

View file

@ -205,7 +205,7 @@ type TrafficKeys struct {
// NodeGet - struct for a single node get response
type NodeGet struct {
Node LegacyNode `json:"node" bson:"node" yaml:"node"`
Node Node `json:"node" bson:"node" yaml:"node"`
Host Host `json:"host" yaml:"host"`
Peers []wgtypes.PeerConfig `json:"peers" bson:"peers" yaml:"peers"`
HostPeers []wgtypes.PeerConfig `json:"host_peers" bson:"host_peers" yaml:"host_peers"`

View file

@ -238,11 +238,10 @@ func UpdateMetrics(client mqtt.Client, msg mqtt.Message) {
logger.Log(2, "updating peers after node", currentNode.ID.String(), currentNode.Network, "detected connectivity issues")
host, err := logic.GetHost(currentNode.HostID.String())
if err == nil {
if err = PublishSingleHostUpdate(host); err != nil {
if err = PublishSingleHostPeerUpdate(host, nil); err != nil {
logger.Log(0, "failed to publish update after failover peer change for node", currentNode.ID.String(), currentNode.Network)
}
}
}
logger.Log(1, "updated node metrics", id)

View file

@ -25,7 +25,7 @@ func PublishPeerUpdate() error {
}
for _, host := range hosts {
host := host
err = PublishSingleHostUpdate(&host)
err = PublishSingleHostPeerUpdate(&host, nil)
if err != nil {
logger.Log(1, "failed to publish peer update to host", host.ID.String(), ": ", err.Error())
}
@ -33,10 +33,31 @@ func PublishPeerUpdate() error {
return err
}
// PublishSingleHostUpdate --- determines and publishes a peer update to one host
func PublishSingleHostUpdate(host *models.Host) error {
// PublishDeletedNodePeerUpdate --- determines and publishes a peer update
// to all the hosts with a deleted node to account for
func PublishDeletedNodePeerUpdate(delNode *models.Node) error {
if !servercfg.IsMessageQueueBackend() {
return nil
}
peerUpdate, err := logic.GetPeerUpdateForHost(host)
hosts, err := logic.GetAllHosts()
if err != nil {
logger.Log(1, "err getting all hosts", err.Error())
return err
}
for _, host := range hosts {
host := host
if err = PublishSingleHostPeerUpdate(&host, delNode); err != nil {
logger.Log(1, "failed to publish peer update to host", host.ID.String(), ": ", err.Error())
}
}
return err
}
// PublishSingleHostPeerUpdate --- determines and publishes a peer update to one host
func PublishSingleHostPeerUpdate(host *models.Host, deletedNode *models.Node) error {
peerUpdate, err := logic.GetPeerUpdateForHost("", host, deletedNode)
if err != nil {
return err
}
@ -59,13 +80,6 @@ func PublishSingleHostUpdate(host *models.Host) error {
return publish(host, fmt.Sprintf("peers/host/%s/%s", host.ID.String(), servercfg.GetServer()), data)
}
// PublishExtPeerUpdate --- publishes a peer update to all the peers of a node
func PublishExtPeerUpdate(node *models.Node) error {
go PublishPeerUpdate()
return nil
}
// NodeUpdate -- publishes a node update
func NodeUpdate(node *models.Node) error {
host, err := logic.GetHost(node.HostID.String())
@ -413,7 +427,7 @@ func sendPeers() {
if force {
host := host
logger.Log(2, "sending scheduled peer update (5 min)")
err = PublishSingleHostUpdate(&host)
err = PublishSingleHostPeerUpdate(&host, nil)
if err != nil {
logger.Log(1, "error publishing peer updates for host: ", host.ID.String(), " Err: ", err.Error())
}

237
scripts/nm-quick-interactive.sh → scripts/nm-quick.sh Normal file → Executable file
View file

@ -1,51 +1,8 @@
#!/bin/bash
# setup_netclient - installs netclient locally
setup_netclient() {
# DEV_TEMP - Temporary instructions for testing
# wget https://fileserver.netmaker.org/testing/netclient
# chmod +x netclient
# ./netclient install
# RELEASE_REPLACE - Use this once release is ready
# if [ -f /etc/debian_version ]; then
# curl -sL 'https://apt.netmaker.org/gpg.key' | sudo tee /etc/apt/trusted.gpg.d/netclient.asc
# curl -sL 'https://apt.netmaker.org/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/netclient.list
# sudo apt update
# sudo apt install netclient
# elif [ -f /etc/centos-release ]; then
# curl -sL 'https://rpm.netmaker.org/gpg.key' | sudo tee /tmp/gpg.key
# curl -sL 'https://rpm.netmaker.org/netclient-repo' | sudo tee /etc/yum.repos.d/netclient.repo
# sudo rpm --import /tmp/gpg.key
# sudo dnf check-update
# sudo dnf install netclient
# elif [ -f /etc/fedora-release ]; then
# curl -sL 'https://rpm.netmaker.org/gpg.key' | sudo tee /tmp/gpg.key
# curl -sL 'https://rpm.netmaker.org/netclient-repo' | sudo tee /etc/yum.repos.d/netclient.repo
# sudo rpm --import /tmp/gpg.key
# sudo dnf check-update
# sudo dnf install netclient
# elif [ -f /etc/redhat-release ]; then
# curl -sL 'https://rpm.netmaker.org/gpg.key' | sudo tee /tmp/gpg.key
# curl -sL 'https://rpm.netmaker.org/netclient-repo' | sudo tee /etc/yum.repos.d/netclient.repo
# sudo rpm --import /tmp/gpg.key
# sudo dnf check-update(
# sudo dnf install netclient
# elif [ -f /etc/arch-release ]; then
# yay -S netclient
# else
# echo "OS not supported for automatic install"
# exit 1
# fi
# if [ -z "${install_cmd}" ]; then
# echo "OS unsupported for automatic dependency install"
# exit 1
# fi
}
LATEST="v0.18.2"
print_logo() {(
cat << "EOF"
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -62,13 +19,82 @@ cat << "EOF"
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
EOF
)}
if [ $(id -u) -ne 0 ]; then
echo "This script must be run as root"
exit 1
fi
if [ -z "$1" ]; then
unset INSTALL_TYPE
unset BUILD_TYPE
unset BUILD_TAG
unset IMAGE_TAG
usage () {(
echo "usage: ./nm-quick.sh [-e] [-b buildtype] [-t tag]"
echo " -e if specified, will install netmaker EE"
echo " -b type of build; options:"
echo " \"version\" - will install a specific version of Netmaker using remote git and dockerhub"
echo " \"local\": - will install by cloning repo and and building images from git"
echo " \"branch\": - will install a specific branch using remote git and dockerhub "
echo " -t tag of build; if buildtype=version, tag=version. If builtype=branch or builtype=local, tag=branch"
echo "examples:"
echo " nm-quick.sh -e -b version -t v0.18.2"
echo " nm-quick.sh -e -b local -t feature_v0.17.2_newfeature"
echo " nm-quick.sh -e -b branch -t develop"
exit 1
)}
while getopts evb:t: flag
do
case "${flag}" in
e)
INSTALL_TYPE="ee"
;;
v)
usage
exit 0
;;
b)
BUILD_TYPE=${OPTARG}
if [[ ! "$BUILD_TYPE" =~ ^(version|local|branch)$ ]]; then
echo "error: $BUILD_TYPE is invalid"
echo "valid options: version, local, branch"
usage
exit 1
fi
;;
t)
BUILD_TAG=${OPTARG}
;;
esac
done
if [ -z "$BUILD_TYPE" ]; then
BUILD_TYPE="version"
BUILD_TAG=$LATEST
fi
if [ -z "$BUILD_TAG" ] && [ "$BUILD_TYPE" = "version" ]; then
BUILD_TAG=$LATEST
fi
if [ -z "$BUILD_TAG" ] && [ ! -z "$BUILD_TYPE" ]; then
echo "error: must specify build tag when build type \"$BUILD_TYPE\" is specified"
usage
exit 1
fi
IMAGE_TAG=$(sed 's/\//-/g' <<< "$BUILD_TAG")
if [ "$1" = "ce" ]; then
INSTALL_TYPE="ce"
elif [ "$1" = "ee" ]; then
INSTALL_TYPE="ee"
fi
if [ -z "$INSTALL_TYPE" ]; then
echo "-----------------------------------------------------"
echo "Would you like to install Netmaker Community Edition (CE), or Netmaker Enterprise Edition (EE)?"
echo "EE will require you to create an account at https://dashboard.license.netmaker.io"
@ -88,16 +114,15 @@ if [ -z "$1" ]; then
*) echo "invalid option $REPLY";;
esac
done
elif [ "$1" = "ce" ]; then
echo "installing Netmaker CE"
INSTALL_TYPE="ce"
elif [ "$1" = "ee" ]; then
echo "installing Netmaker EE"
INSTALL_TYPE="ee"
else
echo "install type invalid (options: 'ce, ee')"
exit 1
fi
echo "-----------Build Options-----------------------------"
echo " EE or CE: $INSTALL_TYPE";
echo " Build Type: $BUILD_TYPE";
echo " Build Tag: $BUILD_TAG";
echo " Image Tag: $IMAGE_TAG";
echo "-----------------------------------------------------"
print_logo
wait_seconds() {(
for ((a=1; a <= $1; a++))
@ -118,45 +143,67 @@ confirm() {(
done
)}
local_install_setup() {(
rm -rf netmaker-tmp
mkdir netmaker-tmp
cd netmaker-tmp
git clone https://www.github.com/gravitl/netmaker
cd netmaker
git checkout $BUILD_TAG
git pull origin $BUILD_TAG
docker build --no-cache --build-arg version=$IMAGE_TAG -t gravitl/netmaker:$IMAGE_TAG .
if [ "$INSTALL_TYPE" = "ee" ]; then
cp compose/docker-compose.ee.yml /root/docker-compose.yml
cp docker/Caddyfile-EE /root/Caddyfile
else
cp compose/docker-compose.yml /root/docker-compose.yml
cp docker/Caddyfile /root/Caddyfile
fi
cp docker/mosquitto.conf /root/mosquitto.conf
cp docker/wait.sh /root/wait.sh
cd ../../
rm -rf netmaker-tmp
)}
echo "checking dependencies..."
OS=$(uname)
if [ -f /etc/debian_version ]; then
dependencies="wireguard wireguard-tools jq docker.io docker-compose"
dependencies="git wireguard wireguard-tools jq docker.io docker-compose"
update_cmd='apt update'
install_cmd='apt-get install -y'
elif [ -f /etc/alpine-release ]; then
dependencies="wireguard jq docker.io docker-compose"
dependencies="git wireguard jq docker.io docker-compose"
update_cmd='apk update'
install_cmd='apk --update add'
elif [ -f /etc/centos-release ]; then
dependencies="wireguard jq docker.io docker-compose"
dependencies="git wireguard jq docker.io docker-compose"
update_cmd='yum update'
install_cmd='yum install -y'
elif [ -f /etc/fedora-release ]; then
dependencies="wireguard jq docker.io docker-compose"
dependencies="git wireguard jq docker.io docker-compose"
update_cmd='dnf update'
install_cmd='dnf install -y'
elif [ -f /etc/redhat-release ]; then
dependencies="wireguard jq docker.io docker-compose"
dependencies="git wireguard jq docker.io docker-compose"
update_cmd='yum update'
install_cmd='yum install -y'
elif [ -f /etc/arch-release ]; then
dependecies="wireguard-tools jq docker.io docker-compose"
dependecies="git wireguard-tools jq docker.io docker-compose"
update_cmd='pacman -Sy'
install_cmd='pacman -S --noconfirm'
elif [ "${OS}" = "FreeBSD" ]; then
dependencies="wireguard wget jq docker.io docker-compose"
dependencies="git wireguard wget jq docker.io docker-compose"
update_cmd='pkg update'
install_cmd='pkg install -y'
elif [ -f /etc/turris-version ]; then
dependencies="wireguard-tools bash jq docker.io docker-compose"
dependencies="git wireguard-tools bash jq docker.io docker-compose"
OS="TurrisOS"
update_cmd='opkg update'
install_cmd='opkg install'
elif [ -f /etc/openwrt_release ]; then
dependencies="wireguard-tools bash jq docker.io docker-compose"
dependencies="git wireguard-tools bash jq docker.io docker-compose"
OS="OpenWRT"
update_cmd='opkg update'
install_cmd='opkg install'
@ -228,11 +275,21 @@ echo "-----------------------------------------------------"
wait_seconds 3
if [ "$BUILD_TYPE" = "local" ]; then
local_install_setup
fi
set -e
NETMAKER_BASE_DOMAIN=nm.$(dig myip.opendns.com @resolver1.opendns.com +short | tr . -).nip.io
IP_ADDR=$(dig -4 myip.opendns.com @resolver1.opendns.com +short)
if [ "$IP_ADDR" = "" ]; then
IP_ADDR=$(curl -s ifconfig.me)
fi
NETMAKER_BASE_DOMAIN=nm.$(echo $IP_ADDR | tr . -).nip.io
COREDNS_IP=$(ip route get 1 | sed -n 's/^.*src \([0-9.]*\) .*$/\1/p')
SERVER_PUBLIC_IP=$(dig myip.opendns.com @resolver1.opendns.com +short)
SERVER_PUBLIC_IP=$IP_ADDR
MASTER_KEY=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 30 ; echo '')
DOMAIN_TYPE=""
echo "-----------------------------------------------------"
@ -381,15 +438,18 @@ wait_seconds 3
echo "Pulling config files..."
COMPOSE_URL="https://raw.githubusercontent.com/gravitl/netmaker/master/compose/docker-compose.yml"
CADDY_URL="https://raw.githubusercontent.com/gravitl/netmaker/master/docker/Caddyfile"
COMPOSE_URL="https://raw.githubusercontent.com/gravitl/netmaker/$BUILD_TAG/compose/docker-compose.yml"
CADDY_URL="https://raw.githubusercontent.com/gravitl/netmaker/$BUILD_TAG/docker/Caddyfile"
if [ "$INSTALL_TYPE" = "ee" ]; then
COMPOSE_URL="https://raw.githubusercontent.com/gravitl/netmaker/master/compose/docker-compose.ee.yml"
CADDY_URL="https://raw.githubusercontent.com/gravitl/netmaker/master/docker/Caddyfile-EE"
COMPOSE_URL="https://raw.githubusercontent.com/gravitl/netmaker/$BUILD_TAG/compose/docker-compose.ee.yml"
CADDY_URL="https://raw.githubusercontent.com/gravitl/netmaker/$BUILD_TAG/docker/Caddyfile-EE"
fi
if [ ! "$BUILD_TYPE" = "local" ]; then
wget -O /root/docker-compose.yml $COMPOSE_URL && wget -O /root/mosquitto.conf https://raw.githubusercontent.com/gravitl/netmaker/$BUILD_TAG/docker/mosquitto.conf && wget -O /root/Caddyfile $CADDY_URL
wget -O /root/wait.sh https://raw.githubusercontent.com/gravitl/netmaker/$BUILD_TAG/docker/wait.sh
fi
wget -O /root/docker-compose.yml $COMPOSE_URL && wget -O /root/mosquitto.conf https://raw.githubusercontent.com/gravitl/netmaker/master/docker/mosquitto.conf && wget -O /root/Caddyfile $CADDY_URL
wget -O /root/wait.sh https://raw.githubusercontent.com/gravitl/netmaker/master/docker/wait.sh
chmod +x /root/wait.sh
mkdir -p /etc/netmaker
@ -406,6 +466,19 @@ if [ "$INSTALL_TYPE" = "ee" ]; then
sed -i "s~YOUR_LICENSE_KEY~$LICENSE_KEY~g" /root/docker-compose.yml
sed -i "s/YOUR_ACCOUNT_ID/$ACCOUNT_ID/g" /root/docker-compose.yml
fi
if [ "$BUILD_TYPE" = "version" ] && [ "$INSTALL_TYPE" = "ee" ]; then
sed -i "s/REPLACE_SERVER_IMAGE_TAG/$IMAGE_TAG-ee/g" /root/docker-compose.yml
else
sed -i "s/REPLACE_SERVER_IMAGE_TAG/$IMAGE_TAG/g" /root/docker-compose.yml
fi
if [ "$BUILD_TYPE" = "local" ]; then
sed -i "s/REPLACE_UI_IMAGE_TAG/$LATEST/g" /root/docker-compose.yml
else
sed -i "s/REPLACE_UI_IMAGE_TAG/$IMAGE_TAG/g" /root/docker-compose.yml
fi
echo "Starting containers..."
docker-compose -f /root/docker-compose.yml up -d
@ -455,28 +528,8 @@ ACCESS_TOKEN=$(jq -r '.accessstring' <<< ${curlresponse})
wait_seconds 3
# echo "Installing Netclient"
# setup_netclient
# echo "Adding Netclient to Network"
# netclient join -t $ACCESS_TOKEN
# # TODO - Get Host ID
# echo "Setting Netclient as Default Host"
# HOST_ID=$(grep 'id:' /etc/netclient/netclient.yml | awk '{print $2}')
# echo $HOST_ID
# # TODO - API call to make host default
# echo "Setting Netclient as Ingress Gateway"
# if [[ ! -z "$SERVER_ID" ]]; then
# curl -o /dev/null -s -X POST -H "Authorization: Bearer $MASTER_KEY" -H 'Content-Type: application/json' https://api.${NETMAKER_BASE_DOMAIN}/api/nodes/netmaker/$HOST_ID/createingress
# fi
)}
set +e
test_connection

View file

@ -187,7 +187,7 @@ collect_server_settings() {
STUN_NAME="stun.$SERVER_NAME"
echo "-----------------------------------------------------"
echo "Netmaker v0.18.0 requires a new DNS entry for $STUN_NAME."
echo "Netmaker v0.18.2 requires a new DNS entry for $STUN_NAME."
echo "Please confirm this is added to your DNS provider before continuing"
echo "(note: this is not required if using an nip.io address)"
echo "-----------------------------------------------------"
@ -245,7 +245,7 @@ set_compose() {
sed -i "s/v0.17.1/testing/g" /root/docker-compose.yml
# RELEASE_REPLACE - Use this once release is ready
#sed -i "s/v0.17.1/v0.18.0/g" /root/docker-compose.yml
#sed -i "s/v0.17.1/v0.18.2/g" /root/docker-compose.yml
yq ".services.netmaker.environment.SERVER_NAME = \"$SERVER_NAME\"" -i /root/docker-compose.yml
yq ".services.netmaker.environment += {\"BROKER_NAME\": \"$BROKER_NAME\"}" -i /root/docker-compose.yml
yq ".services.netmaker.environment += {\"STUN_NAME\": \"$STUN_NAME\"}" -i /root/docker-compose.yml
@ -416,7 +416,7 @@ join_networks() {
# create an egress if necessary
if [[ $HAS_EGRESS == "yes" ]]; then
echo "Egress is currently unimplemented. Wait for 0.18.1"
echo "Egress is currently unimplemented. Wait for 0.18.2"
fi
echo "HAS INGRESS: $HAS_INGRESS"
@ -444,11 +444,10 @@ join_networks() {
fi
}
cat << "EOF"
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
The Netmaker Upgrade Script: Upgrading to v0.18.0 so you don't have to!
The Netmaker Upgrade Script: Upgrading to v0.18.2 so you don't have to!
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
EOF

View file

@ -46,10 +46,6 @@ func GetServerConfig() config.ServerConfig {
if IsRestBackend() {
cfg.RestBackend = "on"
}
cfg.AgentBackend = "off"
if IsAgentBackend() {
cfg.AgentBackend = "on"
}
cfg.DNSMode = "off"
if IsDNSMode() {
cfg.DNSMode = "on"
@ -167,15 +163,6 @@ func GetAPIHost() string {
return serverhost
}
// GetPodIP - get the pod's ip
func GetPodIP() string {
podip := "127.0.0.1"
if os.Getenv("POD_IP") != "" {
podip = os.Getenv("POD_IP")
}
return podip
}
// GetAPIPort - gets the api port
func GetAPIPort() string {
apiport := "8081"
@ -198,19 +185,6 @@ func GetStunAddr() string {
return stunAddr
}
// GetDefaultNodeLimit - get node limit if one is set
func GetDefaultNodeLimit() int32 {
var limit int32
limit = 999999999
envlimit, err := strconv.Atoi(os.Getenv("DEFAULT_NODE_LIMIT"))
if err == nil && envlimit != 0 {
limit = int32(envlimit)
} else if config.Config.Server.DefaultNodeLimit != 0 {
limit = config.Config.Server.DefaultNodeLimit
}
return limit
}
// GetCoreDNSAddr - gets the core dns address
func GetCoreDNSAddr() string {
addr, _ := GetPublicIP()
@ -313,21 +287,6 @@ func IsMetricsExporter() bool {
return export
}
// IsAgentBackend - checks if agent backed is on or off
func IsAgentBackend() bool {
isagent := true
if os.Getenv("AGENT_BACKEND") != "" {
if os.Getenv("AGENT_BACKEND") == "off" {
isagent = false
}
} else if config.Config.Server.AgentBackend != "" {
if config.Config.Server.AgentBackend == "off" {
isagent = false
}
}
return isagent
}
// IsMessageQueueBackend - checks if message queue is on or off
func IsMessageQueueBackend() bool {
ismessagequeue := true
@ -487,7 +446,7 @@ func GetPlatform() string {
if os.Getenv("PLATFORM") != "" {
platform = os.Getenv("PLATFORM")
} else if config.Config.Server.Platform != "" {
platform = config.Config.Server.SQLConn
platform = config.Config.Server.Platform
}
return platform
}
@ -525,18 +484,6 @@ func SetNodeID(id string) {
config.Config.Server.NodeID = id
}
// GetServerCheckinInterval - gets the server check-in time
func GetServerCheckinInterval() int64 {
var t = int64(5)
var envt, _ = strconv.Atoi(os.Getenv("SERVER_CHECKIN_INTERVAL"))
if envt > 0 {
t = int64(envt)
} else if config.Config.Server.ServerCheckinInterval > 0 {
t = config.Config.Server.ServerCheckinInterval
}
return t
}
// GetAuthProviderInfo = gets the oauth provider info
func GetAuthProviderInfo() (pi []string) {
var authProvider = ""
@ -637,7 +584,7 @@ func GetLicenseKey() string {
func GetNetmakerAccountID() string {
netmakerAccountID := os.Getenv("NETMAKER_ACCOUNT_ID")
if netmakerAccountID == "" {
netmakerAccountID = config.Config.Server.LicenseValue
netmakerAccountID = config.Config.Server.NetmakerAccountID
}
return netmakerAccountID
}

View file

@ -565,8 +565,6 @@ definitions:
type: string
APIPort:
type: string
AgentBackend:
type: string
AllowedOrigin:
type: string
AuthProvider:
@ -585,9 +583,6 @@ definitions:
type: string
Database:
type: string
DefaultNodeLimit:
format: int32
type: integer
DisableRemoteIPCheck:
type: string
DisplayKeys:
@ -624,9 +619,6 @@ definitions:
type: string
Server:
type: string
ServerCheckinInterval:
format: int64
type: integer
Telemetry:
type: string
Verbosity:
@ -718,7 +710,7 @@ info:
API calls must be authenticated via a header of the format -H “Authorization: Bearer <YOUR_SECRET_KEY>” There are two methods to obtain YOUR_SECRET_KEY: 1. Using the masterkey. By default, this value is “secret key,” but you should change this on your instance and keep it secure. This value can be set via env var at startup or in a config file (config/environments/< env >.yaml). See the [Netmaker](https://docs.netmaker.org/index.html) documentation for more details. 2. Using a JWT received for a node. This can be retrieved by calling the /api/nodes/<network>/authenticate endpoint, as documented below.
title: Netmaker
version: 0.18.0
version: 0.18.2
paths:
/api/dns:
get:

View file

@ -4,7 +4,6 @@ server:
masterkey: "secretkey"
allowedorigin: "*"
restbackend: true
agentbackend: true
mongoconn:
user: "mongoadmin"
pass: "mongopass"