mirror of
https://github.com/gravitl/netmaker.git
synced 2025-09-10 23:24:32 +08:00
* feat: api access tokens
* revoke all user tokens
* redefine access token api routes, add auto egress option to enrollment keys
* add server settings apis, add db table for settigs
* handle server settings updates
* switch to using settings from DB
* fix sever settings migration
* revet force migration for settings
* fix server settings database write
* egress model
* fix revoked tokens to be unauthorized
* update egress model
* remove unused functions
* convert access token to sql schema
* switch access token to sql schema
* fix merge conflicts
* fix server settings types
* bypass basic auth setting for super admin
* add TODO comment
* setup api handlers for egress revamp
* use single DB, fix update nat boolean field
* extend validaiton checks for egress ranges
* add migration to convert to new egress model
* fix panic interface conversion
* publish peer update on settings update
* revoke token generated by an user
* add user token creation restriction by user role
* add forbidden check for access token creation
* revoke user token when group or role is changed
* add default group to admin users on update
* chore(go): import style changes from migration branch;
1. Singular file names for table schema.
2. No table name method.
3. Use .Model instead of .Table.
4. No unnecessary tagging.
* remove nat check on egress gateway request
* Revert "remove nat check on egress gateway request"
This reverts commit 0aff12a189
.
* remove nat check on egress gateway request
* feat(go): add db middleware;
* feat(go): restore method;
* feat(go): add user access token schema;
* add inet gw status to egress model
* fetch node ids in the tag, add inet gw info clients
* add inet gw info to node from egress list
* add migration logic internet gws
* create default acl policies
* add egress info
* add egress TODO
* add egress TODO
* fix user auth api:
* add reference id to acl policy
* add egress response from DB
* publish peer update on egress changes
* re initalise oauth and email config
* set verbosity
* normalise cidr on egress req
* add egress id to acl group
* change acls to use egress id
* resolve merge conflicts
* fix egress reference errors
* move egress model to schema
* add api context to DB
* sync auto update settings with hosts
* sync auto update settings with hosts
* check acl for egress node
* check for egress policy in the acl dst groups
* fix acl rules for egress policies with new models
* add status to egress model
* fix inet node func
* mask secret and convert jwt duration to minutes
* enable egress policies on creation
* convert jwt duration to minutes
* add relevant ranges to inet egress
* skip non active egress routes
* resolve merge conflicts
* fix static check
* update gorm tag for primary key on egress model
* create user policies for egress resources
* resolve merge conflicts
* get egress info on failover apis, add egress src validation for inet gws
* add additional validation checks on egress req
* add additional validation checks on egress req
* skip all resources for inet policy
* delete associated egress acl policies
* fix failover of inetclient
* avoid setting inet client asd inet gw
* fix all resource egress policy
* fix inet gw egress rule
* check for node egress on relay req
* fix egress acl rules comms
* add new field for egress info on node
* check acl policy in failover ctx
* avoid default host to be set as inet client
* fix relayed egress node
* add valid error messaging for egress validate func
* return if inet default host
* jump port detection to 51821
* check host ports on pull
* check user access gws via acls
* add validation check for default host and failover for inet clients
* add error messaging for acl policy check
* fix inet gw status
* ignore failover req for peer using inet gw
* check for allowed egress ranges for a peer
* add egress routes to static nodes by access
* avoid setting failvoer as inet client
* fix egress error messaging
* fix extclients egress comms
* fix inet gw acting as inet client
* return formatted error on update acl validation
* add default route for static nodes on inetclient
* check relay node acting as inetclient
* move inet node info to separate field, fix all resouces policy
* remove debug logs
---------
Co-authored-by: Vishal Dalwadi <dalwadivishal26@gmail.com>
245 lines
7 KiB
Go
245 lines
7 KiB
Go
package logic
|
|
|
|
import (
|
|
"errors"
|
|
"net"
|
|
"sync"
|
|
|
|
"github.com/google/uuid"
|
|
"github.com/gravitl/netmaker/logger"
|
|
"github.com/gravitl/netmaker/logic"
|
|
"github.com/gravitl/netmaker/models"
|
|
"golang.org/x/exp/slog"
|
|
)
|
|
|
|
var failOverCtxMutex = &sync.RWMutex{}
|
|
var failOverCacheMutex = &sync.RWMutex{}
|
|
var failOverCache = make(map[models.NetworkID]string)
|
|
|
|
func InitFailOverCache() {
|
|
failOverCacheMutex.Lock()
|
|
defer failOverCacheMutex.Unlock()
|
|
networks, err := logic.GetNetworks()
|
|
if err != nil {
|
|
return
|
|
}
|
|
allNodes, err := logic.GetAllNodes()
|
|
if err != nil {
|
|
return
|
|
}
|
|
|
|
for _, network := range networks {
|
|
networkNodes := logic.GetNetworkNodesMemory(allNodes, network.NetID)
|
|
for _, node := range networkNodes {
|
|
if node.IsFailOver {
|
|
failOverCache[models.NetworkID(network.NetID)] = node.ID.String()
|
|
break
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func CheckFailOverCtx(failOverNode, victimNode, peerNode models.Node) error {
|
|
failOverCtxMutex.RLock()
|
|
defer failOverCtxMutex.RUnlock()
|
|
if peerNode.FailOverPeers == nil {
|
|
return nil
|
|
}
|
|
if victimNode.FailOverPeers == nil {
|
|
return nil
|
|
}
|
|
_, peerHasFailovered := peerNode.FailOverPeers[victimNode.ID.String()]
|
|
_, victimHasFailovered := victimNode.FailOverPeers[peerNode.ID.String()]
|
|
if peerHasFailovered && victimHasFailovered &&
|
|
victimNode.FailedOverBy == failOverNode.ID && peerNode.FailedOverBy == failOverNode.ID {
|
|
return errors.New("failover ctx is already set")
|
|
}
|
|
return nil
|
|
}
|
|
func SetFailOverCtx(failOverNode, victimNode, peerNode models.Node) error {
|
|
failOverCtxMutex.Lock()
|
|
defer failOverCtxMutex.Unlock()
|
|
if peerNode.FailOverPeers == nil {
|
|
peerNode.FailOverPeers = make(map[string]struct{})
|
|
}
|
|
if victimNode.FailOverPeers == nil {
|
|
victimNode.FailOverPeers = make(map[string]struct{})
|
|
}
|
|
_, peerHasFailovered := peerNode.FailOverPeers[victimNode.ID.String()]
|
|
_, victimHasFailovered := victimNode.FailOverPeers[peerNode.ID.String()]
|
|
if peerHasFailovered && victimHasFailovered &&
|
|
victimNode.FailedOverBy == failOverNode.ID && peerNode.FailedOverBy == failOverNode.ID {
|
|
return errors.New("failover ctx is already set")
|
|
}
|
|
peerNode.FailOverPeers[victimNode.ID.String()] = struct{}{}
|
|
victimNode.FailOverPeers[peerNode.ID.String()] = struct{}{}
|
|
victimNode.FailedOverBy = failOverNode.ID
|
|
peerNode.FailedOverBy = failOverNode.ID
|
|
if err := logic.UpsertNode(&victimNode); err != nil {
|
|
return err
|
|
}
|
|
if err := logic.UpsertNode(&peerNode); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// GetFailOverNode - gets the host acting as failOver
|
|
func GetFailOverNode(network string, allNodes []models.Node) (models.Node, error) {
|
|
nodes := logic.GetNetworkNodesMemory(allNodes, network)
|
|
for _, node := range nodes {
|
|
if node.IsFailOver {
|
|
return node, nil
|
|
}
|
|
}
|
|
return models.Node{}, errors.New("auto relay not found")
|
|
}
|
|
|
|
func RemoveFailOverFromCache(network string) {
|
|
failOverCacheMutex.Lock()
|
|
defer failOverCacheMutex.Unlock()
|
|
delete(failOverCache, models.NetworkID(network))
|
|
}
|
|
|
|
func SetFailOverInCache(node models.Node) {
|
|
failOverCacheMutex.Lock()
|
|
defer failOverCacheMutex.Unlock()
|
|
failOverCache[models.NetworkID(node.Network)] = node.ID.String()
|
|
}
|
|
|
|
// FailOverExists - checks if failOver exists already in the network
|
|
func FailOverExists(network string) (failOverNode models.Node, exists bool) {
|
|
failOverCacheMutex.RLock()
|
|
defer failOverCacheMutex.RUnlock()
|
|
if nodeID, ok := failOverCache[models.NetworkID(network)]; ok {
|
|
failOverNode, err := logic.GetNodeByID(nodeID)
|
|
if err == nil {
|
|
return failOverNode, true
|
|
}
|
|
}
|
|
return
|
|
}
|
|
|
|
// ResetFailedOverPeer - removes failed over node from network peers
|
|
func ResetFailedOverPeer(failedOveredNode *models.Node) error {
|
|
nodes, err := logic.GetNetworkNodes(failedOveredNode.Network)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
failedOveredNode.FailedOverBy = uuid.Nil
|
|
failedOveredNode.FailOverPeers = make(map[string]struct{})
|
|
err = logic.UpsertNode(failedOveredNode)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for _, node := range nodes {
|
|
if node.FailOverPeers == nil || node.ID == failedOveredNode.ID {
|
|
continue
|
|
}
|
|
delete(node.FailOverPeers, failedOveredNode.ID.String())
|
|
logic.UpsertNode(&node)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// ResetFailOver - reset failovered peers
|
|
func ResetFailOver(failOverNode *models.Node) error {
|
|
// Unset FailedOverPeers
|
|
nodes, err := logic.GetNetworkNodes(failOverNode.Network)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for _, node := range nodes {
|
|
if node.FailedOverBy == failOverNode.ID {
|
|
node.FailedOverBy = uuid.Nil
|
|
node.FailOverPeers = make(map[string]struct{})
|
|
logic.UpsertNode(&node)
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// GetFailOverPeerIps - adds the failedOvered peerIps by the peer
|
|
func GetFailOverPeerIps(peer, node *models.Node) []net.IPNet {
|
|
allowedips := []net.IPNet{}
|
|
for failOverpeerID := range node.FailOverPeers {
|
|
failOverpeer, err := logic.GetNodeByID(failOverpeerID)
|
|
if err == nil && failOverpeer.FailedOverBy == peer.ID {
|
|
logic.GetNodeEgressInfo(&failOverpeer)
|
|
if failOverpeer.Address.IP != nil {
|
|
allowed := net.IPNet{
|
|
IP: failOverpeer.Address.IP,
|
|
Mask: net.CIDRMask(32, 32),
|
|
}
|
|
allowedips = append(allowedips, allowed)
|
|
}
|
|
if failOverpeer.Address6.IP != nil {
|
|
allowed := net.IPNet{
|
|
IP: failOverpeer.Address6.IP,
|
|
Mask: net.CIDRMask(128, 128),
|
|
}
|
|
allowedips = append(allowedips, allowed)
|
|
}
|
|
if failOverpeer.EgressDetails.IsEgressGateway {
|
|
allowedips = append(allowedips, logic.GetEgressIPs(&failOverpeer)...)
|
|
}
|
|
if failOverpeer.IsRelay {
|
|
for _, id := range failOverpeer.RelayedNodes {
|
|
rNode, _ := logic.GetNodeByID(id)
|
|
if rNode.Address.IP != nil {
|
|
allowed := net.IPNet{
|
|
IP: rNode.Address.IP,
|
|
Mask: net.CIDRMask(32, 32),
|
|
}
|
|
allowedips = append(allowedips, allowed)
|
|
}
|
|
if rNode.Address6.IP != nil {
|
|
allowed := net.IPNet{
|
|
IP: rNode.Address6.IP,
|
|
Mask: net.CIDRMask(128, 128),
|
|
}
|
|
allowedips = append(allowedips, allowed)
|
|
}
|
|
if rNode.EgressDetails.IsEgressGateway {
|
|
allowedips = append(allowedips, logic.GetEgressIPs(&rNode)...)
|
|
}
|
|
}
|
|
}
|
|
// handle ingress gateway peers
|
|
if failOverpeer.IsIngressGateway {
|
|
extPeers, _, _, err := logic.GetExtPeers(&failOverpeer, node)
|
|
if err != nil {
|
|
logger.Log(2, "could not retrieve ext peers for ", peer.ID.String(), err.Error())
|
|
}
|
|
for _, extPeer := range extPeers {
|
|
allowedips = append(allowedips, extPeer.AllowedIPs...)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return allowedips
|
|
}
|
|
|
|
func CreateFailOver(node models.Node) error {
|
|
if _, exists := FailOverExists(node.Network); exists {
|
|
return errors.New("failover already exists in the network")
|
|
}
|
|
host, err := logic.GetHost(node.HostID.String())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if host.OS != models.OS_Types.Linux {
|
|
return errors.New("only linux nodes are allowed to be set as failover")
|
|
}
|
|
if node.IsRelayed {
|
|
return errors.New("relayed node cannot be set as failover")
|
|
}
|
|
node.IsFailOver = true
|
|
err = logic.UpsertNode(&node)
|
|
if err != nil {
|
|
slog.Error("failed to upsert node", "node", node.ID.String(), "error", err)
|
|
return err
|
|
}
|
|
SetFailOverInCache(node)
|
|
return nil
|
|
}
|