fix printing, delay throttle

This commit is contained in:
Tom Limoncelli 2025-09-09 17:18:22 -04:00
parent ce24165b26
commit 7e39a2ce27
2 changed files with 83 additions and 24 deletions

View file

@ -51,7 +51,7 @@ type PPreviewArgs struct {
Notify bool
WarnChanges bool
ConcurMode string
ConcurMax int
ConcurMax int // Maximum number of concurrent connections
NoPopulate bool
DePopulate bool
PopulateOnPreview bool
@ -99,6 +99,13 @@ func (args *PPreviewArgs) flags() []cli.Flag {
Destination: &args.ConcurMax,
Value: 999,
Usage: `Maximum number of concurrent connections`,
Action: func(c *cli.Context, v int) error {
if v < 1 {
fmt.Printf("%d is not a valid value for --cmax. Values must be 1 or greater\n", v)
os.Exit(1)
}
return nil
},
})
flags = append(flags, &cli.BoolFlag{
Name: "no-populate",
@ -123,7 +130,7 @@ func (args *PPreviewArgs) flags() []cli.Flag {
})
flags = append(flags, &cli.IntFlag{
Name: "reportmax",
Hidden: true,
Hidden: false,
Usage: `Limit the IGNORE/NO_PURGE report to this many lines (Expermental. Will change in the future.)`,
Action: func(ctx *cli.Context, maxreport int) error {
printer.MaxReport = maxreport
@ -199,7 +206,7 @@ func prun(args PPreviewArgs, push bool, interactive bool, out printer.CLI, repor
return err
}
out.PrintfIf(fullMode, "Reading creds.json or equiv.\n")
out.PrintfIf(fullMode, "Reading creds: %q\n", args.CredsFile)
providerConfigs, err := credsfile.LoadProviderConfigs(args.CredsFile)
if err != nil {
return err
@ -222,6 +229,7 @@ func prun(args PPreviewArgs, push bool, interactive bool, out printer.CLI, repor
// Loop over all (or some) zones:
zonesToProcess := whichZonesToProcess(cfg.Domains, args.Domains)
zonesSerial, zonesConcurrent := splitConcurrent(zonesToProcess, args.ConcurMode)
zonesConcurrent = optimizeOrder(zonesConcurrent)
var totalCorrections int
var reportItems []*ReportItem
@ -232,8 +240,8 @@ func prun(args PPreviewArgs, push bool, interactive bool, out printer.CLI, repor
if !args.NoPopulate {
out.PrintfIf(fullMode, "PHASE 1: CHECKING for missing zones\n")
t := throttler.New(args.ConcurMax, len(zonesConcurrent))
out.PrintfIf(fullMode, "CONCURRENTLY checking for %d zone(s)\n", len(zonesConcurrent))
for _, zone := range optimizeOrder(zonesConcurrent) {
out.Printf("CONCURRENTLY checking for %d zone(s)\n", len(zonesConcurrent))
for i, zone := range zonesConcurrent {
out.PrintfIf(fullMode, "Concurrently checking for zone: %q\n", zone.Name)
go func(zone *models.DomainConfig) {
err := oneZonePopulate(zone, zcache)
@ -242,21 +250,36 @@ func prun(args PPreviewArgs, push bool, interactive bool, out printer.CLI, repor
}
t.Done(err)
}(zone)
// Delay the last call to t.Throttle() until the serial processing is done.
if i != ultimate(zonesConcurrent) {
errorCount := t.Throttle()
if errorCount > 0 {
anyErrors = true
}
}
//out.PrintfIf(fullMode && len(zonesConcurrent) > 0, "Waiting for concurrent checking(s) to complete...")
//out.PrintfIf(fullMode && len(zonesConcurrent) > 0, "DONE\n")
out.PrintfIf(fullMode, "SERIALLY checking for %d zone(s)\n", len(zonesSerial))
}
out.Printf("SERIALLY checking for %d zone(s)\n", len(zonesSerial))
for _, zone := range zonesSerial {
out.PrintfIf(fullMode, "Serially checking for zone: %q\n", zone.Name)
out.Printf("Serially checking for zone: %q\n", zone.Name)
if err := oneZonePopulate(zone, zcache); err != nil {
anyErrors = true
}
}
if len(zonesConcurrent) > 0 {
if printer.DefaultPrinter.Verbose {
out.PrintfIf(true, "Waiting for concurrent checking(s) to complete...\n")
} else {
out.PrintfIf(true, "Waiting for concurrent checking(s) to complete...")
}
errorCount := t.Throttle()
if errorCount > 0 {
anyErrors = true
}
out.PrintfIf(true, "DONE\n")
}
for _, zone := range zonesToProcess {
started := false // Do not emit noise when no provider has corrections.
providersToProcess := whichProvidersToProcess(zone.DNSProviderInstances, args.Providers)
@ -283,8 +306,8 @@ func prun(args PPreviewArgs, push bool, interactive bool, out printer.CLI, repor
out.PrintfIf(fullMode, "PHASE 2: GATHERING data\n")
t := throttler.New(args.ConcurMax, len(zonesConcurrent))
out.Printf("CONCURRENTLY gathering %d zone(s)\n", len(zonesConcurrent))
for _, zone := range optimizeOrder(zonesConcurrent) {
out.Printf("CONCURRENTLY gathering records of %d zone(s)\n", len(zonesConcurrent))
for i, zone := range zonesConcurrent {
out.PrintfIf(fullMode, "Concurrently gathering: %q\n", zone.Name)
go func(zone *models.DomainConfig, args PPreviewArgs, zcache *cmdZoneCache) {
err := oneZone(zone, args)
@ -293,22 +316,35 @@ func prun(args PPreviewArgs, push bool, interactive bool, out printer.CLI, repor
}
t.Done(err)
}(zone, args, zcache)
// Delay the last call to t.Throttle() until the serial processing is done.
if i != ultimate(zonesConcurrent) {
errorCount := t.Throttle()
if errorCount > 0 {
anyErrors = true
}
}
// TODO(tlim): It would be nice if the concurrent gathering overlapped with the serial gathering.
// This could be achieved by delaying the final call to t.Throttle() until after the serial gathering.
out.Printf("SERIALLY gathering %d zone(s)\n", len(zonesSerial))
}
out.Printf("SERIALLY gathering records of %d zone(s)\n", len(zonesSerial))
for _, zone := range zonesSerial {
out.Printf("Serially Gathering: %q\n", zone.Name)
if err := oneZone(zone, args); err != nil {
anyErrors = true
}
}
//out.PrintfIf(len(zonesConcurrent) > 0, "Waiting for concurrent gathering(s) to complete...")
//out.PrintfIf(len(zonesConcurrent) > 0, "DONE\n")
if len(zonesConcurrent) > 0 {
msg := "Waiting for concurrent gathering(s) to complete..."
if printer.DefaultPrinter.Verbose {
msg = "Waiting for concurrent gathering(s) to complete...\n"
}
out.PrintfIf(true, msg)
errorCount := t.Throttle()
if errorCount > 0 {
anyErrors = true
}
out.PrintfIf(true, "DONE\n")
}
anyErrors = cmp.Or(anyErrors, concurrentErrors.Load())
// Now we know what to do, print or do the tasks.

23
commands/ultimate.go Normal file
View file

@ -0,0 +1,23 @@
package commands
import "github.com/StackExchange/dnscontrol/v4/models"
/*
I proposed that Go add something like "len()" that returns the highest
index. This would avoid off-by-one errors. The proposed names include
ultimate(), ult(), high(), highest().
Nay-sayers said I should implement this as a function and see if I
actually used it. (I suspect the nay-sayers are perfect people that
never make off-by-one errors.)
That's what this file is about. It should be exactly the same (except
the first line) anywhere this is needed. After a few years I'll be
able to report if it actually helped.
Go will in-line this function.
*/
func ultimate(s []*models.DomainConfig) int {
return len(s) - 1
}