mirror of
https://github.com/gravitl/netmaker.git
synced 2024-11-10 17:48:25 +08:00
Merge pull request #1014 from gravitl/feature_v0.13.0_mq_refactor
mq direct to server public ip
This commit is contained in:
commit
010c1deddc
14 changed files with 212 additions and 158 deletions
|
@ -16,6 +16,7 @@ services:
|
|||
- net.ipv4.conf.all.src_valid_mark=1
|
||||
restart: always
|
||||
environment:
|
||||
SERVER_NAME: "broker.NETMAKER_BASE_DOMAIN"
|
||||
SERVER_HOST: "SERVER_PUBLIC_IP"
|
||||
SERVER_API_CONN_STRING: "api.NETMAKER_BASE_DOMAIN:443"
|
||||
SERVER_GRPC_CONN_STRING: "grpc.NETMAKER_BASE_DOMAIN:443"
|
||||
|
|
|
@ -73,7 +73,7 @@ type ServerConfig struct {
|
|||
HostNetwork string `yaml:"hostnetwork"`
|
||||
CommsCIDR string `yaml:"commscidr"`
|
||||
MQPort string `yaml:"mqport"`
|
||||
CommsID string `yaml:"commsid"`
|
||||
Server string `yaml:"server"`
|
||||
}
|
||||
|
||||
// SQLConfig - Generic SQL Config
|
||||
|
|
|
@ -92,12 +92,6 @@ func (s *NodeServiceServer) CreateNode(ctx context.Context, req *nodepb.Object)
|
|||
Server: key,
|
||||
}
|
||||
|
||||
commID, err := logic.FetchCommsNetID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.CommID = commID
|
||||
|
||||
err = logic.CreateNode(&node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
3
go.mod
3
go.mod
|
@ -34,6 +34,7 @@ require (
|
|||
require (
|
||||
github.com/go-ping/ping v0.0.0-20211130115550-779d1e919534
|
||||
github.com/guumaster/hostctl v1.1.2
|
||||
github.com/kr/pretty v0.3.0
|
||||
github.com/posthog/posthog-go v0.0.0-20211028072449-93c17c49e2b0
|
||||
)
|
||||
|
||||
|
@ -53,6 +54,7 @@ require (
|
|||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/josharian/native v1.0.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/mdlayher/genetlink v1.2.0 // indirect
|
||||
github.com/mdlayher/netlink v1.6.0 // indirect
|
||||
|
@ -61,6 +63,7 @@ require (
|
|||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spf13/afero v1.3.2 // indirect
|
||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect
|
||||
|
|
|
@ -51,17 +51,12 @@ func CreateAccessKey(accesskey models.AccessKey, network models.Network) (models
|
|||
|
||||
netID := network.NetID
|
||||
|
||||
commsNetID, err := FetchCommsNetID()
|
||||
if err != nil {
|
||||
return models.AccessKey{}, errors.New("could not retrieve comms netid")
|
||||
}
|
||||
|
||||
var accessToken models.AccessToken
|
||||
s := servercfg.GetServerConfig()
|
||||
servervals := models.ServerConfig{
|
||||
GRPCConnString: s.GRPCConnString,
|
||||
GRPCSSL: s.GRPCSSL,
|
||||
CommsNetwork: commsNetID,
|
||||
Server: s.Server,
|
||||
}
|
||||
accessToken.ServerConfig = servervals
|
||||
accessToken.ClientConfig.Network = netID
|
||||
|
|
|
@ -14,5 +14,5 @@ type ClientConfig struct {
|
|||
type ServerConfig struct {
|
||||
GRPCConnString string `json:"grpcconn"`
|
||||
GRPCSSL string `json:"grpcssl"`
|
||||
CommsNetwork string `json:"commsnetwork"`
|
||||
Server string `json:"server"`
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ type Node struct {
|
|||
OS string `json:"os" bson:"os" yaml:"os"`
|
||||
MTU int32 `json:"mtu" bson:"mtu" yaml:"mtu"`
|
||||
Version string `json:"version" bson:"version" yaml:"version"`
|
||||
CommID string `json:"commid" bson:"commid" yaml:"comid"`
|
||||
Server string `json:"server" bson:"server" yaml:"server"`
|
||||
TrafficKeys TrafficKeys `json:"traffickeys" bson:"traffickeys" yaml:"traffickeys"`
|
||||
}
|
||||
|
||||
|
@ -393,6 +393,9 @@ func (newNode *Node) Fill(currentNode *Node) {
|
|||
if newNode.IsHub == "" {
|
||||
newNode.IsHub = currentNode.IsHub
|
||||
}
|
||||
if newNode.Server == "" {
|
||||
newNode.Server = currentNode.Server
|
||||
}
|
||||
}
|
||||
|
||||
// StringWithCharset - returns random string inside defined charset
|
||||
|
|
|
@ -42,11 +42,6 @@ func JoinComms(cfg *config.ClientConfig) error {
|
|||
// Join - join command to run from cli
|
||||
func Join(cfg *config.ClientConfig, privateKey string) error {
|
||||
var err error
|
||||
//check if comms network exists
|
||||
if err = JoinComms(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//join network
|
||||
err = functions.JoinNetwork(cfg, privateKey, false)
|
||||
if err != nil && !cfg.DebugOn {
|
||||
|
@ -98,13 +93,13 @@ func Leave(cfg *config.ClientConfig, force bool) error {
|
|||
} else {
|
||||
logger.Log(0, "success")
|
||||
}
|
||||
nets, err := ncutils.GetSystemNetworks()
|
||||
if err == nil && len(nets) == 1 {
|
||||
if nets[0] == cfg.Node.CommID {
|
||||
logger.Log(1, "detected comms as remaining network, removing...")
|
||||
err = functions.LeaveNetwork(nets[0], true)
|
||||
}
|
||||
}
|
||||
//nets, err := ncutils.GetSystemNetworks()
|
||||
//if err == nil && len(nets) == 1 {
|
||||
//if nets[0] == cfg.Node.CommID {
|
||||
//logger.Log(1, "detected comms as remaining network, removing...")
|
||||
//err = functions.LeaveNetwork(nets[0], true)
|
||||
//}
|
||||
//}
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ type ServerConfig struct {
|
|||
AccessKey string `yaml:"accesskey"`
|
||||
GRPCSSL string `yaml:"grpcssl"`
|
||||
CommsNetwork string `yaml:"commsnetwork"`
|
||||
Server string `yaml:"server"`
|
||||
}
|
||||
|
||||
// Write - writes the config of a client to disk
|
||||
|
@ -188,7 +189,7 @@ func GetCLIConfig(c *cli.Context) (ClientConfig, string, error) {
|
|||
cfg.Server.AccessKey = accesstoken.ClientConfig.Key
|
||||
cfg.Node.LocalRange = accesstoken.ClientConfig.LocalRange
|
||||
cfg.Server.GRPCSSL = accesstoken.ServerConfig.GRPCSSL
|
||||
cfg.Server.CommsNetwork = accesstoken.ServerConfig.CommsNetwork
|
||||
cfg.Server.Server = accesstoken.ServerConfig.Server
|
||||
if c.String("grpcserver") != "" {
|
||||
cfg.Server.GRPCAddress = c.String("grpcserver")
|
||||
}
|
||||
|
|
|
@ -36,42 +36,39 @@ type cachedMessage struct {
|
|||
|
||||
// Daemon runs netclient daemon from command line
|
||||
func Daemon() error {
|
||||
var exists = struct{}{}
|
||||
serverSet := make(map[string]struct{})
|
||||
// == initial pull of all networks ==
|
||||
networks, _ := ncutils.GetSystemNetworks()
|
||||
for _, network := range networks {
|
||||
cfg := config.ClientConfig{}
|
||||
cfg.Network = network
|
||||
cfg.ReadConfig()
|
||||
serverSet[cfg.Server.Server] = exists
|
||||
//temporary code --- remove in version v0.13.0
|
||||
removeHostDNS(network, ncutils.IsWindows())
|
||||
// end of code to be removed in version v0.13.0
|
||||
var cfg config.ClientConfig
|
||||
cfg.Network = network
|
||||
cfg.ReadConfig()
|
||||
initialPull(cfg.Network)
|
||||
}
|
||||
|
||||
// == get all the comms networks on machine ==
|
||||
commsNetworks, err := getCommsNetworks(networks[:])
|
||||
if err != nil {
|
||||
return errors.New("no comm networks exist")
|
||||
}
|
||||
|
||||
// == subscribe to all nodes on each comms network on machine ==
|
||||
for currCommsNet := range commsNetworks {
|
||||
logger.Log(1, "started comms network daemon, ", currCommsNet)
|
||||
// == subscribe to all nodes for each on machine ==
|
||||
for server := range serverSet {
|
||||
logger.Log(1, "started daemon for server ", server)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
networkcontext.Store(currCommsNet, cancel)
|
||||
go messageQueue(ctx, currCommsNet)
|
||||
networkcontext.Store(server, cancel)
|
||||
go messageQueue(ctx, server)
|
||||
}
|
||||
|
||||
// == add waitgroup and cancel for checkin routine ==
|
||||
wg := sync.WaitGroup{}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg.Add(1)
|
||||
go Checkin(ctx, &wg, commsNetworks)
|
||||
go Checkin(ctx, &wg, serverSet)
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, syscall.SIGTERM, os.Interrupt, os.Kill)
|
||||
signal.Notify(quit, syscall.SIGTERM, os.Interrupt)
|
||||
<-quit
|
||||
for currCommsNet := range commsNetworks {
|
||||
if cancel, ok := networkcontext.Load(currCommsNet); ok {
|
||||
for server := range serverSet {
|
||||
if cancel, ok := networkcontext.Load(server); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
}
|
||||
|
@ -101,16 +98,14 @@ func UpdateKeys(nodeCfg *config.ClientConfig, client mqtt.Client) error {
|
|||
}
|
||||
|
||||
nodeCfg.Node.PublicKey = key.PublicKey().String()
|
||||
var commsCfg = getCommsCfgByNode(&nodeCfg.Node)
|
||||
PublishNodeUpdate(&commsCfg, nodeCfg)
|
||||
PublishNodeUpdate(nodeCfg)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PingServer -- checks if server is reachable
|
||||
// use commsCfg only*
|
||||
func PingServer(commsCfg *config.ClientConfig) error {
|
||||
node := getServerAddress(commsCfg)
|
||||
pinger, err := ping.NewPinger(node)
|
||||
func PingServer(cfg *config.ClientConfig) error {
|
||||
pinger, err := ping.NewPinger(cfg.Server.Server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -120,6 +115,7 @@ func PingServer(commsCfg *config.ClientConfig) error {
|
|||
if stats.PacketLoss == 100 {
|
||||
return errors.New("ping error")
|
||||
}
|
||||
logger.Log(3, "ping of server", cfg.Server.Server, "was successful")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -140,16 +136,12 @@ func setSubscriptions(client mqtt.Client, nodeCfg *config.ClientConfig) {
|
|||
logger.Log(0, token.Error().Error())
|
||||
return
|
||||
}
|
||||
if nodeCfg.DebugOn {
|
||||
logger.Log(0, fmt.Sprintf("subscribed to node updates for node %s update/%s/%s", nodeCfg.Node.Name, nodeCfg.Node.Network, nodeCfg.Node.ID))
|
||||
}
|
||||
logger.Log(3, fmt.Sprintf("subscribed to node updates for node %s update/%s/%s", nodeCfg.Node.Name, nodeCfg.Node.Network, nodeCfg.Node.ID))
|
||||
if token := client.Subscribe(fmt.Sprintf("peers/%s/%s", nodeCfg.Node.Network, nodeCfg.Node.ID), 0, mqtt.MessageHandler(UpdatePeers)); token.Wait() && token.Error() != nil {
|
||||
logger.Log(0, token.Error().Error())
|
||||
return
|
||||
}
|
||||
if nodeCfg.DebugOn {
|
||||
logger.Log(0, fmt.Sprintf("subscribed to peer updates for node %s peers/%s/%s", nodeCfg.Node.Name, nodeCfg.Node.Network, nodeCfg.Node.ID))
|
||||
}
|
||||
logger.Log(3, fmt.Sprintf("subscribed to peer updates for node %s peers/%s/%s", nodeCfg.Node.Name, nodeCfg.Node.Network, nodeCfg.Node.ID))
|
||||
}
|
||||
|
||||
// on a delete usually, pass in the nodecfg to unsubscribe client broker communications
|
||||
|
@ -171,23 +163,109 @@ func unsubscribeNode(client mqtt.Client, nodeCfg *config.ClientConfig) {
|
|||
}
|
||||
|
||||
// sets up Message Queue and subsribes/publishes updates to/from server
|
||||
// the client should subscribe to ALL nodes that exist on unique comms network locally
|
||||
func messageQueue(ctx context.Context, commsNet string) {
|
||||
var commsCfg config.ClientConfig
|
||||
commsCfg.Network = commsNet
|
||||
commsCfg.ReadConfig()
|
||||
logger.Log(0, "netclient daemon started for network: ", commsNet)
|
||||
client := setupMQTT(&commsCfg, false)
|
||||
// the client should subscribe to ALL nodes that exist on server locally
|
||||
func messageQueue(ctx context.Context, server string) {
|
||||
logger.Log(0, "netclient daemon started for server: ", server)
|
||||
client := setupMQTTSub(server)
|
||||
defer client.Disconnect(250)
|
||||
<-ctx.Done()
|
||||
logger.Log(0, "shutting down daemon for comms network ", commsNet)
|
||||
logger.Log(0, "shutting down daemon for server ", server)
|
||||
}
|
||||
|
||||
// setupMQTTSub creates a connection to broker and subscribes to topic
|
||||
// utilizes comms client configs to setup connections
|
||||
func setupMQTTSub(server string) mqtt.Client {
|
||||
opts := mqtt.NewClientOptions()
|
||||
opts.AddBroker(server + ":1883") // TODO get the appropriate port of the comms mq server
|
||||
opts.ClientID = ncutils.MakeRandomString(23) // helps avoid id duplication on broker
|
||||
opts.SetDefaultPublishHandler(All)
|
||||
opts.SetAutoReconnect(true)
|
||||
opts.SetConnectRetry(true)
|
||||
opts.SetConnectRetryInterval(time.Second << 2)
|
||||
opts.SetKeepAlive(time.Minute >> 1)
|
||||
opts.SetWriteTimeout(time.Minute)
|
||||
opts.SetOnConnectHandler(func(client mqtt.Client) {
|
||||
networks, err := ncutils.GetSystemNetworks()
|
||||
if err != nil {
|
||||
logger.Log(0, "error retriving networks ", err.Error())
|
||||
}
|
||||
for _, network := range networks {
|
||||
var currNodeCfg config.ClientConfig
|
||||
currNodeCfg.Network = network
|
||||
currNodeCfg.ReadConfig()
|
||||
if currNodeCfg.Server.Server == server {
|
||||
setSubscriptions(client, &currNodeCfg)
|
||||
}
|
||||
}
|
||||
})
|
||||
opts.SetOrderMatters(true)
|
||||
opts.SetResumeSubs(true)
|
||||
opts.SetConnectionLostHandler(func(c mqtt.Client, e error) {
|
||||
logger.Log(0, "detected broker connection lost, running pull for all nodes")
|
||||
networks, err := ncutils.GetSystemNetworks()
|
||||
if err != nil {
|
||||
logger.Log(0, "error retriving networks ", err.Error())
|
||||
}
|
||||
for _, network := range networks {
|
||||
var cfg config.ClientConfig
|
||||
cfg.Network = network
|
||||
cfg.ReadConfig()
|
||||
_, err := Pull(cfg.Node.Network, true)
|
||||
if err != nil {
|
||||
logger.Log(0, "could not run pull, server unreachable: ", err.Error())
|
||||
logger.Log(0, "waiting to retry...")
|
||||
}
|
||||
}
|
||||
// don't think following log message is accurate
|
||||
//logger.Log(0, "connection re-established with mqtt server")
|
||||
})
|
||||
|
||||
client := mqtt.NewClient(opts)
|
||||
tperiod := time.Now().Add(12 * time.Second)
|
||||
for {
|
||||
//if after 12 seconds, try a gRPC pull on the last try
|
||||
if time.Now().After(tperiod) {
|
||||
networks, err := ncutils.GetSystemNetworks()
|
||||
if err != nil {
|
||||
logger.Log(0, "error retriving networks ", err.Error())
|
||||
}
|
||||
for _, network := range networks {
|
||||
var cfg config.ClientConfig
|
||||
cfg.Network = network
|
||||
cfg.ReadConfig()
|
||||
if cfg.Server.Server == server {
|
||||
_, err := Pull(cfg.Node.Network, true)
|
||||
if err != nil {
|
||||
logger.Log(0, "could not run pull, exiting ", cfg.Node.Network, " setup: ", err.Error())
|
||||
return client
|
||||
}
|
||||
}
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
if token := client.Connect(); token.Wait() && token.Error() != nil {
|
||||
logger.Log(0, "unable to connect to broker, retrying ...")
|
||||
if time.Now().After(tperiod) {
|
||||
logger.Log(0, "could not connect to broker, exiting ", server, " setup: ", token.Error().Error())
|
||||
if strings.Contains(token.Error().Error(), "connectex") || strings.Contains(token.Error().Error(), "i/o timeout") {
|
||||
logger.Log(0, "connection issue detected.. restarting daemon")
|
||||
daemon.Restart()
|
||||
}
|
||||
return client
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// setupMQTT creates a connection to broker and return client
|
||||
// utilizes comms client configs to setup connections
|
||||
func setupMQTT(commsCfg *config.ClientConfig, publish bool) mqtt.Client {
|
||||
func setupMQTT(cfg *config.ClientConfig, publish bool) mqtt.Client {
|
||||
opts := mqtt.NewClientOptions()
|
||||
server := getServerAddress(commsCfg)
|
||||
server := cfg.Server.Server
|
||||
opts.AddBroker(server + ":1883") // TODO get the appropriate port of the comms mq server
|
||||
opts.ClientID = ncutils.MakeRandomString(23) // helps avoid id duplication on broker
|
||||
opts.SetDefaultPublishHandler(All)
|
||||
|
@ -213,8 +291,8 @@ func setupMQTT(commsCfg *config.ClientConfig, publish bool) mqtt.Client {
|
|||
opts.SetOrderMatters(true)
|
||||
opts.SetResumeSubs(true)
|
||||
opts.SetConnectionLostHandler(func(c mqtt.Client, e error) {
|
||||
logger.Log(0, "detected broker connection lost, running pull for ", commsCfg.Node.Network)
|
||||
_, err := Pull(commsCfg.Node.Network, true)
|
||||
logger.Log(0, "detected broker connection lost, running pull for ", cfg.Node.Network)
|
||||
_, err := Pull(cfg.Node.Network, true)
|
||||
if err != nil {
|
||||
logger.Log(0, "could not run pull, server unreachable: ", err.Error())
|
||||
logger.Log(0, "waiting to retry...")
|
||||
|
@ -227,10 +305,10 @@ func setupMQTT(commsCfg *config.ClientConfig, publish bool) mqtt.Client {
|
|||
for {
|
||||
//if after 12 seconds, try a gRPC pull on the last try
|
||||
if time.Now().After(tperiod) {
|
||||
logger.Log(0, "running pull for ", commsCfg.Node.Network)
|
||||
_, err := Pull(commsCfg.Node.Network, true)
|
||||
logger.Log(0, "running pull for ", cfg.Node.Network)
|
||||
_, err := Pull(cfg.Node.Network, true)
|
||||
if err != nil {
|
||||
logger.Log(0, "could not run pull, exiting ", commsCfg.Node.Network, " setup: ", err.Error())
|
||||
logger.Log(0, "could not run pull, exiting ", cfg.Node.Network, " setup: ", err.Error())
|
||||
return client
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
@ -238,10 +316,10 @@ func setupMQTT(commsCfg *config.ClientConfig, publish bool) mqtt.Client {
|
|||
if token := client.Connect(); token.Wait() && token.Error() != nil {
|
||||
logger.Log(0, "unable to connect to broker, retrying ...")
|
||||
if time.Now().After(tperiod) {
|
||||
logger.Log(0, "could not connect to broker, exiting ", commsCfg.Node.Network, " setup: ", token.Error().Error())
|
||||
logger.Log(0, "could not connect to broker, exiting ", cfg.Node.Network, " setup: ", token.Error().Error())
|
||||
if strings.Contains(token.Error().Error(), "connectex") || strings.Contains(token.Error().Error(), "i/o timeout") {
|
||||
logger.Log(0, "connection issue detected.. pulling and restarting daemon")
|
||||
Pull(commsCfg.Node.Network, true)
|
||||
Pull(cfg.Node.Network, true)
|
||||
daemon.Restart()
|
||||
}
|
||||
return client
|
||||
|
@ -255,8 +333,8 @@ func setupMQTT(commsCfg *config.ClientConfig, publish bool) mqtt.Client {
|
|||
}
|
||||
|
||||
// publishes a message to server to update peers on this peer's behalf
|
||||
func publishSignal(commsCfg, nodeCfg *config.ClientConfig, signal byte) error {
|
||||
if err := publish(commsCfg, nodeCfg, fmt.Sprintf("signal/%s", nodeCfg.Node.ID), []byte{signal}, 1); err != nil {
|
||||
func publishSignal(nodeCfg *config.ClientConfig, signal byte) error {
|
||||
if err := publish(nodeCfg, fmt.Sprintf("signal/%s", nodeCfg.Node.ID), []byte{signal}, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -324,24 +402,6 @@ func getServerAddress(cfg *config.ClientConfig) string {
|
|||
return server.Address
|
||||
}
|
||||
|
||||
func getCommsNetworks(networks []string) (map[string]bool, error) {
|
||||
var cfg config.ClientConfig
|
||||
var response = make(map[string]bool, 1)
|
||||
for _, network := range networks {
|
||||
cfg.Network = network
|
||||
cfg.ReadConfig()
|
||||
response[cfg.Node.CommID] = true
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func getCommsCfgByNode(node *models.Node) config.ClientConfig {
|
||||
var commsCfg config.ClientConfig
|
||||
commsCfg.Network = node.CommID
|
||||
commsCfg.ReadConfig()
|
||||
return commsCfg
|
||||
}
|
||||
|
||||
// == Message Caches ==
|
||||
|
||||
func insert(network, which, cache string) {
|
||||
|
|
|
@ -297,8 +297,7 @@ func setListenPort(oldListenPort int32, cfg *config.ClientConfig) {
|
|||
|
||||
// if newListenPort has been modified to find an available port, publish to server
|
||||
if cfg.Node.ListenPort != newListenPort {
|
||||
var currentCommsCfg = getCommsCfgByNode(&cfg.Node)
|
||||
PublishNodeUpdate(¤tCommsCfg, cfg)
|
||||
PublishNodeUpdate(cfg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ func NodeUpdate(client mqtt.Client, msg mqtt.Message) {
|
|||
var network = parseNetworkFromTopic(msg.Topic())
|
||||
nodeCfg.Network = network
|
||||
nodeCfg.ReadConfig()
|
||||
var commsCfg = getCommsCfgByNode(&nodeCfg.Node)
|
||||
|
||||
data, dataErr := decryptMsg(&nodeCfg, msg.Payload())
|
||||
if dataErr != nil {
|
||||
|
@ -131,14 +130,14 @@ func NodeUpdate(client mqtt.Client, msg mqtt.Message) {
|
|||
// }
|
||||
// }
|
||||
// }
|
||||
doneErr := publishSignal(&commsCfg, &nodeCfg, ncutils.DONE)
|
||||
doneErr := publishSignal(&nodeCfg, ncutils.DONE)
|
||||
if doneErr != nil {
|
||||
logger.Log(0, "could not notify server to update peers after interface change")
|
||||
} else {
|
||||
logger.Log(0, "signalled finished interface update to server")
|
||||
}
|
||||
} else if hubChange {
|
||||
doneErr := publishSignal(&commsCfg, &nodeCfg, ncutils.DONE)
|
||||
doneErr := publishSignal(&nodeCfg, ncutils.DONE)
|
||||
if doneErr != nil {
|
||||
logger.Log(0, "could not notify server to update peers after hub change")
|
||||
} else {
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
// Checkin -- go routine that checks for public or local ip changes, publishes changes
|
||||
// if there are no updates, simply "pings" the server as a checkin
|
||||
func Checkin(ctx context.Context, wg *sync.WaitGroup, currentComms map[string]bool) {
|
||||
func Checkin(ctx context.Context, wg *sync.WaitGroup, currentComms map[string]struct{}) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
|
@ -30,58 +30,50 @@ func Checkin(ctx context.Context, wg *sync.WaitGroup, currentComms map[string]bo
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
for commsNet := range currentComms {
|
||||
var currCommsCfg config.ClientConfig
|
||||
currCommsCfg.Network = commsNet
|
||||
currCommsCfg.ReadConfig()
|
||||
for _, network := range networks {
|
||||
var nodeCfg config.ClientConfig
|
||||
nodeCfg.Network = network
|
||||
nodeCfg.ReadConfig()
|
||||
if nodeCfg.Node.CommID != commsNet {
|
||||
continue // skip if not on current comms network
|
||||
for _, network := range networks {
|
||||
var nodeCfg config.ClientConfig
|
||||
nodeCfg.Network = network
|
||||
nodeCfg.ReadConfig()
|
||||
if nodeCfg.Node.IsStatic != "yes" {
|
||||
extIP, err := ncutils.GetPublicIP()
|
||||
if err != nil {
|
||||
logger.Log(1, "error encountered checking public ip addresses: ", err.Error())
|
||||
}
|
||||
if nodeCfg.Node.IsStatic != "yes" {
|
||||
extIP, err := ncutils.GetPublicIP()
|
||||
if err != nil {
|
||||
logger.Log(1, "error encountered checking public ip addresses: ", err.Error())
|
||||
}
|
||||
if nodeCfg.Node.Endpoint != extIP && extIP != "" {
|
||||
logger.Log(1, "endpoint has changed from ", nodeCfg.Node.Endpoint, " to ", extIP)
|
||||
nodeCfg.Node.Endpoint = extIP
|
||||
if err := PublishNodeUpdate(&currCommsCfg, &nodeCfg); err != nil {
|
||||
logger.Log(0, "could not publish endpoint change")
|
||||
}
|
||||
}
|
||||
intIP, err := getPrivateAddr()
|
||||
if err != nil {
|
||||
logger.Log(1, "error encountered checking private ip addresses: ", err.Error())
|
||||
}
|
||||
if nodeCfg.Node.LocalAddress != intIP && intIP != "" {
|
||||
logger.Log(1, "local Address has changed from ", nodeCfg.Node.LocalAddress, " to ", intIP)
|
||||
nodeCfg.Node.LocalAddress = intIP
|
||||
if err := PublishNodeUpdate(&currCommsCfg, &nodeCfg); err != nil {
|
||||
logger.Log(0, "could not publish local address change")
|
||||
}
|
||||
}
|
||||
} else if nodeCfg.Node.IsLocal == "yes" && nodeCfg.Node.LocalRange != "" {
|
||||
localIP, err := ncutils.GetLocalIP(nodeCfg.Node.LocalRange)
|
||||
if err != nil {
|
||||
logger.Log(1, "error encountered checking local ip addresses: ", err.Error())
|
||||
}
|
||||
if nodeCfg.Node.Endpoint != localIP && localIP != "" {
|
||||
logger.Log(1, "endpoint has changed from "+nodeCfg.Node.Endpoint+" to ", localIP)
|
||||
nodeCfg.Node.Endpoint = localIP
|
||||
if err := PublishNodeUpdate(&currCommsCfg, &nodeCfg); err != nil {
|
||||
logger.Log(0, "could not publish localip change")
|
||||
}
|
||||
if nodeCfg.Node.Endpoint != extIP && extIP != "" {
|
||||
logger.Log(1, "endpoint has changed from ", nodeCfg.Node.Endpoint, " to ", extIP)
|
||||
nodeCfg.Node.Endpoint = extIP
|
||||
if err := PublishNodeUpdate(&nodeCfg); err != nil {
|
||||
logger.Log(0, "could not publish endpoint change")
|
||||
}
|
||||
}
|
||||
if err := PingServer(&currCommsCfg); err != nil {
|
||||
logger.Log(0, "could not ping server on comms net, ", currCommsCfg.Network, "\n", err.Error())
|
||||
} else {
|
||||
Hello(&currCommsCfg, &nodeCfg)
|
||||
intIP, err := getPrivateAddr()
|
||||
if err != nil {
|
||||
logger.Log(1, "error encountered checking private ip addresses: ", err.Error())
|
||||
}
|
||||
if nodeCfg.Node.LocalAddress != intIP && intIP != "" {
|
||||
logger.Log(1, "local Address has changed from ", nodeCfg.Node.LocalAddress, " to ", intIP)
|
||||
nodeCfg.Node.LocalAddress = intIP
|
||||
if err := PublishNodeUpdate(&nodeCfg); err != nil {
|
||||
logger.Log(0, "could not publish local address change")
|
||||
}
|
||||
}
|
||||
} else if nodeCfg.Node.IsLocal == "yes" && nodeCfg.Node.LocalRange != "" {
|
||||
localIP, err := ncutils.GetLocalIP(nodeCfg.Node.LocalRange)
|
||||
if err != nil {
|
||||
logger.Log(1, "error encountered checking local ip addresses: ", err.Error())
|
||||
}
|
||||
if nodeCfg.Node.Endpoint != localIP && localIP != "" {
|
||||
logger.Log(1, "endpoint has changed from "+nodeCfg.Node.Endpoint+" to ", localIP)
|
||||
nodeCfg.Node.Endpoint = localIP
|
||||
if err := PublishNodeUpdate(&nodeCfg); err != nil {
|
||||
logger.Log(0, "could not publish localip change")
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := PingServer(&nodeCfg); err != nil {
|
||||
logger.Log(0, "could not ping server for , ", nodeCfg.Network, "\n", err.Error())
|
||||
} else {
|
||||
Hello(&nodeCfg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -89,7 +81,7 @@ func Checkin(ctx context.Context, wg *sync.WaitGroup, currentComms map[string]bo
|
|||
}
|
||||
|
||||
// PublishNodeUpdates -- saves node and pushes changes to broker
|
||||
func PublishNodeUpdate(commsCfg, nodeCfg *config.ClientConfig) error {
|
||||
func PublishNodeUpdate(nodeCfg *config.ClientConfig) error {
|
||||
if err := config.Write(nodeCfg, nodeCfg.Network); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -97,7 +89,7 @@ func PublishNodeUpdate(commsCfg, nodeCfg *config.ClientConfig) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = publish(commsCfg, nodeCfg, fmt.Sprintf("update/%s", nodeCfg.Node.ID), data, 1); err != nil {
|
||||
if err = publish(nodeCfg, fmt.Sprintf("update/%s", nodeCfg.Node.ID), data, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Log(0, "sent a node update to server for node", nodeCfg.Node.Name, ", ", nodeCfg.Node.ID)
|
||||
|
@ -105,20 +97,21 @@ func PublishNodeUpdate(commsCfg, nodeCfg *config.ClientConfig) error {
|
|||
}
|
||||
|
||||
// Hello -- ping the broker to let server know node it's alive and well
|
||||
func Hello(commsCfg, nodeCfg *config.ClientConfig) {
|
||||
if err := publish(commsCfg, nodeCfg, fmt.Sprintf("ping/%s", nodeCfg.Node.ID), []byte(ncutils.Version), 0); err != nil {
|
||||
func Hello(nodeCfg *config.ClientConfig) {
|
||||
if err := publish(nodeCfg, fmt.Sprintf("ping/%s", nodeCfg.Node.ID), []byte(ncutils.Version), 0); err != nil {
|
||||
logger.Log(0, fmt.Sprintf("error publishing ping, %v", err))
|
||||
logger.Log(0, "running pull on "+commsCfg.Node.Network+" to reconnect")
|
||||
_, err := Pull(commsCfg.Node.Network, true)
|
||||
logger.Log(0, "running pull on "+nodeCfg.Node.Network+" to reconnect")
|
||||
_, err := Pull(nodeCfg.Node.Network, true)
|
||||
if err != nil {
|
||||
logger.Log(0, "could not run pull on "+commsCfg.Node.Network+", error: "+err.Error())
|
||||
logger.Log(0, "could not run pull on "+nodeCfg.Node.Network+", error: "+err.Error())
|
||||
}
|
||||
}
|
||||
logger.Log(3, "server checkin complete")
|
||||
}
|
||||
|
||||
// requires the commscfg in which to send traffic over and nodecfg of node that is publish the message
|
||||
// node cfg is so that the traffic keys of that node may be fetched for encryption
|
||||
func publish(commsCfg, nodeCfg *config.ClientConfig, dest string, msg []byte, qos byte) error {
|
||||
func publish(nodeCfg *config.ClientConfig, dest string, msg []byte, qos byte) error {
|
||||
// setup the keys
|
||||
trafficPrivKey, err := auth.RetrieveTrafficKey(nodeCfg.Node.Network)
|
||||
if err != nil {
|
||||
|
@ -130,7 +123,7 @@ func publish(commsCfg, nodeCfg *config.ClientConfig, dest string, msg []byte, qo
|
|||
return err
|
||||
}
|
||||
|
||||
client := setupMQTT(commsCfg, true)
|
||||
client := setupMQTT(nodeCfg, true)
|
||||
defer client.Disconnect(250)
|
||||
encrypted, err := ncutils.Chunk(msg, serverPubKey, trafficPrivKey)
|
||||
if err != nil {
|
||||
|
|
|
@ -96,7 +96,7 @@ func GetServerConfig() config.ServerConfig {
|
|||
cfg.ManageIPTables = ManageIPTables()
|
||||
services := strings.Join(GetPortForwardServiceList(), ",")
|
||||
cfg.PortForwardServices = services
|
||||
cfg.CommsID = GetCommsID()
|
||||
cfg.Server = GetServer()
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
@ -412,6 +412,17 @@ func ManageIPTables() string {
|
|||
return manage
|
||||
}
|
||||
|
||||
// GetServer - gets the server name
|
||||
func GetServer() string {
|
||||
server := ""
|
||||
if os.Getenv("SERVER_NAME") != "" {
|
||||
server = os.Getenv("SERVER_NAME")
|
||||
} else if config.Config.Server.Server != "" {
|
||||
server = config.Config.Server.Server
|
||||
}
|
||||
return server
|
||||
}
|
||||
|
||||
// IsDNSMode - should it run with DNS
|
||||
func IsDNSMode() bool {
|
||||
isdns := true
|
||||
|
|
Loading…
Reference in a new issue