Revert "adjusted main to use one single context"

This reverts commit 92d0d12e8f.
This commit is contained in:
0xdcarns 2023-02-27 13:36:32 -05:00
parent 977c9c8c19
commit 2749e7311b
5 changed files with 105 additions and 56 deletions

View file

@ -4,8 +4,11 @@ import (
"context"
"fmt"
"net/http"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"github.com/gorilla/handlers"
@ -30,7 +33,7 @@ var HttpHandlers = []interface{}{
}
// HandleRESTRequests - handles the rest requests
func HandleRESTRequests(wg *sync.WaitGroup, ctx context.Context) {
func HandleRESTRequests(wg *sync.WaitGroup) {
defer wg.Done()
r := mux.NewRouter()
@ -56,14 +59,18 @@ func HandleRESTRequests(wg *sync.WaitGroup, ctx context.Context) {
}()
logger.Log(0, "REST Server successfully started on port ", port, " (REST)")
// Relay os.Interrupt to our channel (os.Interrupt = CTRL+C)
// Ignore other incoming signals
ctx, stop := signal.NotifyContext(context.TODO(), syscall.SIGTERM, os.Interrupt)
defer stop()
// Block main routine until a signal is received
// As long as user doesn't press CTRL+C a message is not passed and our main routine keeps running
<-ctx.Done()
// After receiving CTRL+C Properly stop the server
logger.Log(0, "Stopping the REST server...")
if err := srv.Shutdown(context.TODO()); err != nil {
logger.Log(0, "REST shutdown error occurred -", err.Error())
}
logger.Log(0, "REST Server closed.")
logger.DumpFile(fmt.Sprintf("data/netmaker.log.%s", time.Now().Format(logger.TimeFormatDay)))
srv.Shutdown(context.TODO())
}

View file

@ -1,6 +1,7 @@
package logic
import (
"context"
"encoding/json"
"errors"
"fmt"
@ -420,6 +421,35 @@ func updateProNodeACLS(node *models.Node) error {
return nil
}
func PurgePendingNodes(ctx context.Context) {
ticker := time.NewTicker(NodePurgeCheckTime)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
nodes, err := GetAllNodes()
if err != nil {
logger.Log(0, "PurgePendingNodes failed to retrieve nodes", err.Error())
continue
}
for _, node := range nodes {
if node.PendingDelete {
modified := node.LastModified
if time.Since(modified) > NodePurgeTime {
if err := DeleteNode(&node, true); err != nil {
logger.Log(0, "failed to purge node", node.ID.String(), err.Error())
} else {
logger.Log(0, "purged node ", node.ID.String())
}
}
}
}
}
}
}
// createNode - creates a node in database
func createNode(node *models.Node) error {
host, err := GetHost(node.HostID.String())

52
main.go
View file

@ -36,16 +36,12 @@ func main() {
setupConfig(*absoluteConfigPath)
servercfg.SetVersion(version)
fmt.Println(models.RetrieveLogo()) // print the logo
initialize() // initial db and acls
// fmt.Println(models.ProLogo())
initialize() // initial db and acls; gen cert if required
setGarbageCollection()
setVerbosity()
defer database.CloseDB()
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, os.Interrupt)
defer stop()
var waitGroup sync.WaitGroup
startControllers(&waitGroup, ctx) // start the api endpoint and mq and stun
<-ctx.Done()
waitGroup.Wait()
startControllers() // start the api endpoint and mq
}
func setupConfig(absoluteConfigPath string) {
@ -114,7 +110,8 @@ func initialize() { // Client Mode Prereq Check
}
}
func startControllers(wg *sync.WaitGroup, ctx context.Context) {
func startControllers() {
var waitnetwork sync.WaitGroup
if servercfg.IsDNSMode() {
err := logic.SetDNS()
if err != nil {
@ -130,13 +127,13 @@ func startControllers(wg *sync.WaitGroup, ctx context.Context) {
logger.FatalLog("Unable to Set host. Exiting...", err.Error())
}
}
wg.Add(1)
go controller.HandleRESTRequests(wg, ctx)
waitnetwork.Add(1)
go controller.HandleRESTRequests(&waitnetwork)
}
//Run MessageQueue
if servercfg.IsMessageQueueBackend() {
wg.Add(1)
go runMessageQueue(wg, ctx)
waitnetwork.Add(1)
go runMessageQueue(&waitnetwork)
}
if !servercfg.IsRestBackend() && !servercfg.IsMessageQueueBackend() {
@ -144,17 +141,34 @@ func startControllers(wg *sync.WaitGroup, ctx context.Context) {
}
// starts the stun server
wg.Add(1)
go stunserver.Start(wg, ctx)
waitnetwork.Add(1)
go stunserver.Start(&waitnetwork)
if servercfg.IsProxyEnabled() {
waitnetwork.Add(1)
go func() {
defer waitnetwork.Done()
_, cancel := context.WithCancel(context.Background())
waitnetwork.Add(1)
//go nmproxy.Start(ctx, logic.ProxyMgmChan, servercfg.GetAPIHost())
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGTERM, os.Interrupt)
<-quit
cancel()
}()
}
waitnetwork.Wait()
}
// Should we be using a context vice a waitgroup????????????
func runMessageQueue(wg *sync.WaitGroup, ctx context.Context) {
func runMessageQueue(wg *sync.WaitGroup) {
defer wg.Done()
brokerHost, secure := servercfg.GetMessageQueueEndpoint()
logger.Log(0, "connecting to mq broker at", brokerHost, "with TLS?", fmt.Sprintf("%v", secure))
mq.SetupMQTT()
defer mq.CloseClient()
ctx, cancel := context.WithCancel(context.Background())
go mq.Keepalive(ctx)
go func() {
peerUpdate := make(chan *models.Node)
@ -165,7 +179,11 @@ func runMessageQueue(wg *sync.WaitGroup, ctx context.Context) {
}
}
}()
<-ctx.Done()
go logic.PurgePendingNodes(ctx)
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGTERM, os.Interrupt)
<-quit
cancel()
logger.Log(0, "Message Queue shutting down")
}

View file

@ -100,8 +100,3 @@ func Keepalive(ctx context.Context) {
func IsConnected() bool {
return mqclient != nil && mqclient.IsConnected()
}
// CloseClient - function to close the mq connection from server
func CloseClient() {
mqclient.Disconnect(250)
}

View file

@ -4,8 +4,11 @@ import (
"context"
"fmt"
"net"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"github.com/gravitl/netmaker/logger"
"github.com/gravitl/netmaker/servercfg"
@ -20,6 +23,7 @@ import (
// backwards compatibility with RFC 3489.
type Server struct {
Addr string
Ctx context.Context
}
var (
@ -56,58 +60,48 @@ func basicProcess(addr net.Addr, b []byte, req, res *stun.Message) error {
)
}
func (s *Server) serveConn(c net.PacketConn, res, req *stun.Message, ctx context.Context) error {
func (s *Server) serveConn(c net.PacketConn, res, req *stun.Message) error {
if c == nil {
return nil
}
go func(ctx context.Context) {
<-ctx.Done()
if c != nil {
// kill connection on server shutdown
c.Close()
}
}(ctx)
buf := make([]byte, 1024)
n, addr, err := c.ReadFrom(buf) // this be blocky af
n, addr, err := c.ReadFrom(buf)
if err != nil {
if !strings.Contains(err.Error(), "use of closed network connection") {
logger.Log(1, "STUN read error:", err.Error())
}
logger.Log(1, "ReadFrom: %v", err.Error())
return nil
}
if _, err = req.Write(buf[:n]); err != nil {
logger.Log(1, "STUN write error:", err.Error())
logger.Log(1, "Write: %v", err.Error())
return err
}
if err = basicProcess(addr, buf[:n], req, res); err != nil {
if err == errNotSTUNMessage {
return nil
}
logger.Log(1, "STUN process error:", err.Error())
logger.Log(1, "basicProcess: %v", err.Error())
return nil
}
_, err = c.WriteTo(res.Raw, addr)
if err != nil {
logger.Log(1, "STUN response write error", err.Error())
logger.Log(1, "WriteTo: %v", err.Error())
}
return err
}
// Serve reads packets from connections and responds to BINDING requests.
func (s *Server) serve(c net.PacketConn, ctx context.Context) error {
func (s *Server) serve(c net.PacketConn) error {
var (
res = new(stun.Message)
req = new(stun.Message)
)
for {
select {
case <-ctx.Done():
logger.Log(0, "shut down STUN server")
case <-s.Ctx.Done():
logger.Log(0, "Shutting down stun server...")
c.Close()
return nil
default:
if err := s.serveConn(c, res, req, ctx); err != nil {
if err := s.serveConn(c, res, req); err != nil {
logger.Log(1, "serve: %v", err.Error())
continue
}
@ -125,8 +119,9 @@ func listenUDPAndServe(ctx context.Context, serverNet, laddr string) error {
}
s := &Server{
Addr: laddr,
Ctx: ctx,
}
return s.serve(c, ctx)
return s.serve(c)
}
func normalize(address string) string {
@ -140,15 +135,19 @@ func normalize(address string) string {
}
// Start - starts the stun server
func Start(wg *sync.WaitGroup, ctx context.Context) {
defer wg.Done()
func Start(wg *sync.WaitGroup) {
ctx, cancel := context.WithCancel(context.Background())
go func(wg *sync.WaitGroup) {
defer wg.Done()
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGTERM, os.Interrupt)
<-quit
cancel()
}(wg)
normalized := normalize(fmt.Sprintf("0.0.0.0:%d", servercfg.GetStunPort()))
logger.Log(0, "netmaker-stun listening on", normalized, "via udp")
if err := listenUDPAndServe(ctx, "udp", normalized); err != nil {
if strings.Contains(err.Error(), "closed network connection") {
logger.Log(0, "shutdown STUN server")
} else {
logger.Log(0, "server: ", err.Error())
}
err := listenUDPAndServe(ctx, "udp", normalized)
if err != nil {
logger.Log(0, "failed to start stun server: ", err.Error())
}
}