fixed first two issues

This commit is contained in:
0xdcarns 2022-09-28 15:22:12 -04:00
parent b95f3eb846
commit 40f7036093
4 changed files with 17 additions and 14 deletions

View file

@ -96,7 +96,7 @@ func GetPeerUpdate(node *models.Node) (models.PeerUpdate, error) {
continue
}
if len(metrics.FailoverPeers[peer.ID]) > 0 {
logger.Log(0, "peer", peer.Name, peer.PrimaryAddress(), "was found to be in failover peers list for node", node.Name, node.PrimaryAddress())
logger.Log(2, "peer", peer.Name, peer.PrimaryAddress(), "was found to be in failover peers list for node", node.Name, node.PrimaryAddress())
continue
}
pubkey, err := wgtypes.ParseKey(peer.PublicKey)
@ -283,8 +283,13 @@ func GetAllowedIPs(node, peer *models.Node, metrics *models.Metrics) []net.IPNet
// get original node so we can traverse the allowed ips
nodeToFailover, err := GetNodeByID(k)
if err == nil {
allowedips = append(allowedips, getNodeAllowedIPs(&nodeToFailover, peer)...)
logger.Log(0, "failing over node", nodeToFailover.Name, nodeToFailover.PrimaryAddress(), "to failover node", peer.Name)
failoverNodeMetrics, err := GetMetrics(nodeToFailover.ID)
if err == nil && failoverNodeMetrics != nil {
if len(failoverNodeMetrics.NodeName) > 0 {
allowedips = append(allowedips, getNodeAllowedIPs(&nodeToFailover, peer)...)
logger.Log(0, "failing over node", nodeToFailover.Name, nodeToFailover.PrimaryAddress(), "to failover node", peer.Name)
}
}
}
}
}

View file

@ -246,7 +246,7 @@ func updateNodeMetrics(currentNode *models.Node, newMetrics *models.Metrics) {
currMetric.TotalTime += oldMetric.TotalTime
currMetric.Uptime += oldMetric.Uptime // get the total uptime for this connection
currMetric.PercentUp = 100.0 * (float64(currMetric.Uptime) / float64(currMetric.TotalTime))
totalUpMinutes := currMetric.Uptime * 5
totalUpMinutes := currMetric.Uptime * ncutils.CheckInInterval
currMetric.ActualUptime = time.Duration(totalUpMinutes) * time.Minute
delete(oldMetrics.Connectivity, k) // remove from old data
newMetrics.Connectivity[k] = currMetric

View file

@ -31,9 +31,8 @@ var metricsCache = new(sync.Map)
func Checkin(ctx context.Context, wg *sync.WaitGroup) {
logger.Log(2, "starting checkin goroutine")
defer wg.Done()
currentRun := 0
checkin(currentRun)
ticker := time.NewTicker(time.Second * 60)
checkin()
ticker := time.NewTicker(time.Minute * ncutils.CheckInInterval)
defer ticker.Stop()
for {
select {
@ -42,16 +41,12 @@ func Checkin(ctx context.Context, wg *sync.WaitGroup) {
return
//delay should be configuraable -> use cfg.Node.NetworkSettings.DefaultCheckInInterval ??
case <-ticker.C:
currentRun++
checkin(currentRun)
if currentRun >= 0 {
currentRun = 0
}
checkin()
}
}
}
func checkin(currentRun int) {
func checkin() {
networks, _ := ncutils.GetSystemNetworks()
logger.Log(3, "checkin with server(s) for all networks")
for _, network := range networks {
@ -116,7 +111,7 @@ func checkin(currentRun int) {
}
Hello(&nodeCfg)
checkCertExpiry(&nodeCfg)
if currentRun >= 0 && nodeCfg.Server.Is_EE {
if nodeCfg.Server.Is_EE {
logger.Log(0, "collecting metrics for node", nodeCfg.Node.Name)
publishMetrics(&nodeCfg)
}

View file

@ -7,6 +7,9 @@ import (
"github.com/gravitl/netmaker/logger"
)
// CheckInInterval - the interval for check-in time in units/minute
const CheckInInterval = 1
// BackOff - back off any function while there is an error
func BackOff(isExponential bool, maxTime int, f interface{}) (interface{}, error) {
// maxTime seconds