2017-09-30 03:30:36 +08:00
package normalize
import (
Switch to Go 1.13 error wrapping (#604)
* Replaced errors.Wrap with fmt.Errorf (#589)
* Find: errors\.Wrap\(([^,]+),\s+(["`][^"`]*)(["`])\)
Replace: fmt.Errorf($2: %w$3, $1)
* Replaced errors.Wrapf with fmt.Errorf (#589)
* Find: errors\.Wrapf\(([^,]+),\s+(["`][^"`]*)(["`])\)
Replace: fmt.Errorf($2: %w$3, $1)
* Find: errors\.Wrapf\(([^,]+),\s+(["`][^"`]*)(["`])(,[^)]+)\)
* Replace: fmt.Errorf($2: %w$3$4, $1)
* Replaced errors.Errorf with fmt.Errorf (#589)
* Find: errors\.Errorf
Replace: fmt.Errorf
* Cleaned up remaining imports
* Cleanup
* Regenerate provider support matrix
This was broken by #533 ... and it's now the third time this has been missed.
2020-01-29 00:06:56 +08:00
"fmt"
2022-12-26 22:11:52 +08:00
"sort"
2020-07-10 00:52:49 +08:00
"strconv"
2017-09-30 03:30:36 +08:00
"strings"
2020-04-15 04:47:30 +08:00
"github.com/StackExchange/dnscontrol/v3/models"
"github.com/StackExchange/dnscontrol/v3/pkg/spflib"
2022-12-26 22:11:52 +08:00
"golang.org/x/exp/constraints"
2017-09-30 03:30:36 +08:00
)
2022-12-26 22:11:52 +08:00
func sortedKeys [ K constraints . Ordered , V any ] ( m map [ K ] V ) [ ] K {
keys := make ( [ ] K , len ( m ) )
i := 0
for k := range m {
keys [ i ] = k
i ++
}
sort . Slice ( keys , func ( i , j int ) bool { return keys [ i ] < keys [ j ] } )
return keys
}
2017-09-30 03:30:36 +08:00
// hasSpfRecords returns true if this record requests SPF unrolling.
func flattenSPFs ( cfg * models . DNSConfig ) [ ] error {
var cache spflib . CachingResolver
var errs [ ] error
var err error
for _ , domain := range cfg . Domains {
2021-07-25 23:53:31 +08:00
txtRecords := domain . Records . GetByType ( "TXT" )
2017-09-30 03:30:36 +08:00
// flatten all spf records that have the "flatten" metadata
2021-07-25 23:53:31 +08:00
for _ , txt := range txtRecords {
2017-09-30 03:30:36 +08:00
var rec * spflib . SPFRecord
2020-11-18 20:05:26 +08:00
txtTarget := strings . Join ( txt . TxtStrings , "" )
2017-09-30 03:30:36 +08:00
if txt . Metadata [ "flatten" ] != "" || txt . Metadata [ "split" ] != "" {
if cache == nil {
cache , err = spflib . NewCache ( "spfcache.json" )
if err != nil {
return [ ] error { err }
}
}
2020-11-18 20:05:26 +08:00
rec , err = spflib . Parse ( txtTarget , cache )
2017-09-30 03:30:36 +08:00
if err != nil {
errs = append ( errs , err )
continue
}
}
2020-11-18 20:05:26 +08:00
if flatten , ok := txt . Metadata [ "flatten" ] ; ok && strings . HasPrefix ( txtTarget , "v=spf1" ) {
2017-09-30 03:30:36 +08:00
rec = rec . Flatten ( flatten )
2018-02-16 01:02:50 +08:00
err = txt . SetTargetTXT ( rec . TXT ( ) )
if err != nil {
errs = append ( errs , err )
continue
}
2017-09-30 03:30:36 +08:00
}
// now split if needed
if split , ok := txt . Metadata [ "split" ] ; ok {
2020-07-10 00:52:49 +08:00
overhead1 := 0
// overhead1: The first segment of the SPF record
// needs to be shorter than the others due to the overhead of
// other (non-SPF) txt records. If there are (for example) 50
// bytes of txt records also on this domain record, setting
// overhead1=50 reduces the maxLen by 50. It only affects the
// first part of the split.
if oh , ok := txt . Metadata [ "overhead1" ] ; ok {
i , err := strconv . Atoi ( oh )
if err != nil {
errs = append ( errs , Warning { fmt . Errorf ( "split overhead1 %q is not an int" , oh ) } )
}
overhead1 = i
}
2020-08-01 01:28:13 +08:00
// Default txtMaxSize will not result in multiple TXT strings
txtMaxSize := 255
if oh , ok := txt . Metadata [ "txtMaxSize" ] ; ok {
i , err := strconv . Atoi ( oh )
if err != nil {
errs = append ( errs , Warning { fmt . Errorf ( "split txtMaxSize %q is not an int" , oh ) } )
}
txtMaxSize = i
}
2017-09-30 03:30:36 +08:00
if ! strings . Contains ( split , "%d" ) {
2020-08-31 07:52:37 +08:00
errs = append ( errs , Warning { fmt . Errorf ( "split format `%s` in `%s` is not proper format (missing %%d)" , split , txt . GetLabelFQDN ( ) ) } )
2017-09-30 03:30:36 +08:00
continue
}
2020-08-01 01:28:13 +08:00
recs := rec . TXTSplit ( split + "." + domain . Name , overhead1 , txtMaxSize )
2022-12-26 22:11:52 +08:00
for _ , k := range sortedKeys ( recs ) {
v := recs [ k ]
2017-09-30 03:30:36 +08:00
if k == "@" {
2020-08-01 01:28:13 +08:00
txt . SetTargetTXTs ( v )
2017-09-30 03:30:36 +08:00
} else {
cp , _ := txt . Copy ( )
2020-08-01 01:28:13 +08:00
cp . SetTargetTXTs ( v )
2018-03-20 05:18:58 +08:00
cp . SetLabelFromFQDN ( k , domain . Name )
2017-09-30 03:30:36 +08:00
domain . Records = append ( domain . Records , cp )
}
}
}
}
}
if cache == nil {
return errs
}
// check if cache is stale
for _ , e := range cache . ResolveErrors ( ) {
Switch to Go 1.13 error wrapping (#604)
* Replaced errors.Wrap with fmt.Errorf (#589)
* Find: errors\.Wrap\(([^,]+),\s+(["`][^"`]*)(["`])\)
Replace: fmt.Errorf($2: %w$3, $1)
* Replaced errors.Wrapf with fmt.Errorf (#589)
* Find: errors\.Wrapf\(([^,]+),\s+(["`][^"`]*)(["`])\)
Replace: fmt.Errorf($2: %w$3, $1)
* Find: errors\.Wrapf\(([^,]+),\s+(["`][^"`]*)(["`])(,[^)]+)\)
* Replace: fmt.Errorf($2: %w$3$4, $1)
* Replaced errors.Errorf with fmt.Errorf (#589)
* Find: errors\.Errorf
Replace: fmt.Errorf
* Cleaned up remaining imports
* Cleanup
* Regenerate provider support matrix
This was broken by #533 ... and it's now the third time this has been missed.
2020-01-29 00:06:56 +08:00
errs = append ( errs , Warning { fmt . Errorf ( "problem resolving SPF record: %s" , e ) } )
2017-09-30 03:30:36 +08:00
}
if len ( cache . ResolveErrors ( ) ) == 0 {
changed := cache . ChangedRecords ( )
if len ( changed ) > 0 {
if err := cache . Save ( "spfcache.updated.json" ) ; err != nil {
errs = append ( errs , err )
} else {
2020-08-04 21:43:02 +08:00
errs = append ( errs , Warning { fmt . Errorf ( "%d spf record lookups are out of date with cache (%s).\nWrote changes to spfcache.updated.json. Please rename and commit:\n $ mv spfcache.updated.json spfcache.json\n $ git commit -m 'Update spfcache.json' spfcache.json" , len ( changed ) , strings . Join ( changed , "," ) ) } )
2017-09-30 03:30:36 +08:00
}
}
}
return errs
}