mirror of
https://github.com/StackExchange/dnscontrol.git
synced 2024-09-20 06:46:19 +08:00
migrate code for github
This commit is contained in:
commit
ef0bbf53af
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
/tmp
|
20
LICENSE
Normal file
20
LICENSE
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2015 Stack Overflow
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
251
js/helpers.js
Normal file
251
js/helpers.js
Normal file
|
@ -0,0 +1,251 @@
|
||||||
|
"use strict";
|
||||||
|
|
||||||
|
var conf = {
|
||||||
|
registrars: [],
|
||||||
|
dns_service_providers: [],
|
||||||
|
domains: []
|
||||||
|
};
|
||||||
|
|
||||||
|
var defaultDsps = [];
|
||||||
|
|
||||||
|
function initialize(){
|
||||||
|
conf = {
|
||||||
|
registrars: [],
|
||||||
|
dns_service_providers: [],
|
||||||
|
domains: []
|
||||||
|
};
|
||||||
|
defaultDsps = [];
|
||||||
|
}
|
||||||
|
|
||||||
|
function NewRegistrar(name,type,meta) {
|
||||||
|
if (type) {
|
||||||
|
type == "MANUAL";
|
||||||
|
}
|
||||||
|
var reg = {name: name, type: type, meta: meta};
|
||||||
|
conf.registrars.push(reg);
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
function NewDSP(name, type, meta) {
|
||||||
|
if ((typeof meta === 'object') && ('ip_conversions' in meta)) {
|
||||||
|
meta.ip_conversions = format_tt(meta.ip_conversions)
|
||||||
|
}
|
||||||
|
var dsp = {name: name, type: type, meta: meta};
|
||||||
|
conf.dns_service_providers.push(dsp);
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
function newDomain(name,registrar) {
|
||||||
|
return {name: name, registrar: registrar, meta:{}, records:[], dsps: [], defaultTTL: 0, nameservers:[]};
|
||||||
|
}
|
||||||
|
|
||||||
|
function processDargs(m, domain) {
|
||||||
|
// for each modifier, if it is a...
|
||||||
|
// function: call it with domain
|
||||||
|
// array: process recursively
|
||||||
|
// object: merge it into metadata
|
||||||
|
// string: assume it is a dsp
|
||||||
|
if (_.isFunction(m)) {
|
||||||
|
m(domain);
|
||||||
|
} else if (_.isArray(m)) {
|
||||||
|
for (var j in m) {
|
||||||
|
processDargs(m[j], domain)
|
||||||
|
}
|
||||||
|
} else if (_.isObject(m)) {
|
||||||
|
_.extend(domain.meta,m);
|
||||||
|
} else if (_.isString(m)) {
|
||||||
|
domain.dsps.push(m);
|
||||||
|
} else {
|
||||||
|
console.log("WARNING: domain modifier type unsupported: ", typeof m, " Domain: ", domain)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// D(name,registrar): Create a DNS Domain. Use the parameters as records and mods.
|
||||||
|
function D(name,registrar) {
|
||||||
|
var domain = newDomain(name,registrar);
|
||||||
|
for (var i = 2; i<arguments.length; i++) {
|
||||||
|
var m = arguments[i];
|
||||||
|
processDargs(m, domain)
|
||||||
|
}
|
||||||
|
var toAdd = _(defaultDsps).difference(domain.dsps);
|
||||||
|
_(toAdd).each(function(x) { domain.dsps.push(x)});
|
||||||
|
conf.domains.push(domain)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TTL(v): Set the TTL for a DNS record.
|
||||||
|
function TTL(v) {
|
||||||
|
return function(r) {
|
||||||
|
r.ttl = v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultTTL(v): Set the default TTL for the domain.
|
||||||
|
function DefaultTTL(v) {
|
||||||
|
return function(d) {
|
||||||
|
d.defaultTTL = v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A(name,ip, recordModifiers...)
|
||||||
|
function A(name, ip) {
|
||||||
|
var mods = getModifiers(arguments,2)
|
||||||
|
return function(d) {
|
||||||
|
addRecord(d,"A",name,ip,mods)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AAAA(name,ip, recordModifiers...)
|
||||||
|
function AAAA(name, ip) {
|
||||||
|
var mods = getModifiers(arguments,2)
|
||||||
|
return function(d) {
|
||||||
|
addRecord(d,"AAAA",name,ip,mods)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CNAME(name,target, recordModifiers...)
|
||||||
|
function CNAME(name, target) {
|
||||||
|
var mods = getModifiers(arguments,2)
|
||||||
|
return function(d) {
|
||||||
|
addRecord(d,"CNAME",name,target,mods)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TXT(name,target, recordModifiers...)
|
||||||
|
function TXT(name, target) {
|
||||||
|
var mods = getModifiers(arguments,2)
|
||||||
|
return function(d) {
|
||||||
|
addRecord(d,"TXT",name,target,mods)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MX(name,priority,target, recordModifiers...)
|
||||||
|
function MX(name, priority, target) {
|
||||||
|
var mods = getModifiers(arguments,3)
|
||||||
|
return function(d) {
|
||||||
|
mods.push(priority);
|
||||||
|
addRecord(d, "MX", name, target, mods)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NS(name,target, recordModifiers...)
|
||||||
|
function NS(name, target) {
|
||||||
|
var mods = getModifiers(arguments,2)
|
||||||
|
return function(d) {
|
||||||
|
addRecord(d,"NS",name,target,mods)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NAMESERVER(name,target)
|
||||||
|
function NAMESERVER(name, target) {
|
||||||
|
return function(d) {
|
||||||
|
d.nameservers.push({name: name, target: target})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function format_tt(transform_table) {
|
||||||
|
// Turn [[low: 1, high: 2, newBase: 3], [low: 4, high: 5, newIP: 6]]
|
||||||
|
// into "1 ~ 2 ~ 3 ~; 4 ~ 5 ~ ~ 6"
|
||||||
|
var lines = []
|
||||||
|
for (var i=0; i < transform_table.length; i++) {
|
||||||
|
var ip = transform_table[i];
|
||||||
|
var newIP = ip.newIP;
|
||||||
|
if (newIP){
|
||||||
|
if(_.isArray(newIP)){
|
||||||
|
newIP = _.map(newIP,function(i){return num2dot(i)}).join(",")
|
||||||
|
}else{
|
||||||
|
newIP = num2dot(newIP);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var row = [
|
||||||
|
num2dot(ip.low),
|
||||||
|
num2dot(ip.high),
|
||||||
|
num2dot(ip.newBase),
|
||||||
|
newIP
|
||||||
|
]
|
||||||
|
lines.push(row.join(" ~ "))
|
||||||
|
}
|
||||||
|
return lines.join(" ; ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// IMPORT_TRANSFORM(translation_table, domain)
|
||||||
|
function IMPORT_TRANSFORM(translation_table, domain) {
|
||||||
|
return function(d) {
|
||||||
|
addRecord(d, "IMPORT_TRANSFORM", "@", domain, [
|
||||||
|
{'transform_table': format_tt(translation_table)}])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PURGE()
|
||||||
|
function PURGE(d) {
|
||||||
|
d.KeepUnknown = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// NO_PURGE()
|
||||||
|
function NO_PURGE(d) {
|
||||||
|
d.KeepUnknown = true
|
||||||
|
}
|
||||||
|
|
||||||
|
function getModifiers(args,start) {
|
||||||
|
var mods = [];
|
||||||
|
for (var i = start;i<args.length; i++) {
|
||||||
|
mods.push(args[i])
|
||||||
|
}
|
||||||
|
return mods;
|
||||||
|
}
|
||||||
|
|
||||||
|
function addRecord(d,type,name,target,mods) {
|
||||||
|
// if target is number, assume ip address. convert it.
|
||||||
|
if (_.isNumber(target)) {
|
||||||
|
target = num2dot(target);
|
||||||
|
}
|
||||||
|
var rec = {type: type, name: name, target: target, ttl:d.defaultTTL, priority: 0, meta:{}};
|
||||||
|
// for each modifier, decide based on type:
|
||||||
|
// - Function: call is with the record as the argument
|
||||||
|
// - Object: merge it into the metadata
|
||||||
|
// - Number: IF MX record assume it is priority
|
||||||
|
if (mods) {
|
||||||
|
for (var i = 0; i< mods.length; i++) {
|
||||||
|
var m = mods[i]
|
||||||
|
if (_.isFunction(m)) {
|
||||||
|
m(rec);
|
||||||
|
} else if (_.isObject(m)) {
|
||||||
|
//convert transforms to strings
|
||||||
|
if (m.transform && _.isArray(m.transform)){
|
||||||
|
m.transform = format_tt(m.transform)
|
||||||
|
}
|
||||||
|
_.extend(rec.meta,m);
|
||||||
|
_.extend(rec.meta,m);
|
||||||
|
} else if (_.isNumber(m) && type == "MX") {
|
||||||
|
rec.priority = m;
|
||||||
|
} else {
|
||||||
|
console.log("WARNING: Modifier type unsupported:", typeof m, "(Skipping!)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.records.push(rec);
|
||||||
|
}
|
||||||
|
|
||||||
|
//ip conversion functions from http://stackoverflow.com/a/8105740/121660
|
||||||
|
// via http://javascript.about.com/library/blipconvert.htm
|
||||||
|
function IP(dot)
|
||||||
|
{
|
||||||
|
var d = dot.split('.');
|
||||||
|
return ((((((+d[0])*256)+(+d[1]))*256)+(+d[2]))*256)+(+d[3]);
|
||||||
|
}
|
||||||
|
|
||||||
|
function num2dot(num)
|
||||||
|
{
|
||||||
|
if(num === undefined){
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
if (_.isString(num)){
|
||||||
|
return num
|
||||||
|
}
|
||||||
|
var d = num%256;
|
||||||
|
for (var i = 3; i > 0; i--)
|
||||||
|
{
|
||||||
|
num = Math.floor(num/256);
|
||||||
|
d = num%256 + '.' + d;
|
||||||
|
}
|
||||||
|
return d;
|
||||||
|
}
|
46
js/js.go
Normal file
46
js/js.go
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
package js
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
|
||||||
|
"github.com/robertkrimen/otto"
|
||||||
|
//load underscore js into vm by default
|
||||||
|
_ "github.com/robertkrimen/otto/underscore"
|
||||||
|
)
|
||||||
|
|
||||||
|
//ExecuteJavascript accepts a javascript string and runs it, returning the resulting dnsConfig.
|
||||||
|
func ExecuteJavascript(script string, devMode bool) (*models.DNSConfig, error) {
|
||||||
|
vm := otto.New()
|
||||||
|
|
||||||
|
helperJs := GetHelpers(devMode)
|
||||||
|
// run helper script to prime vm and initialize variables
|
||||||
|
if _, err := vm.Run(string(helperJs)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// run user script
|
||||||
|
if _, err := vm.Run(script); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// export conf as string and unmarshal
|
||||||
|
value, err := vm.Run(`JSON.stringify(conf)`)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
str, err := value.ToString()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
conf := &models.DNSConfig{}
|
||||||
|
if err = json.Unmarshal([]byte(str), conf); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return conf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetHelpers(devMode bool) string {
|
||||||
|
return FSMustString(devMode, "/helpers.js")
|
||||||
|
}
|
58
js/js_test.go
Normal file
58
js/js_test.go
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
package js
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
const testDir = "js/parse_tests"
|
||||||
|
|
||||||
|
func TestParsedFiles(t *testing.T) {
|
||||||
|
os.Chdir("..") // go up a directory so we helpers.js is in a consistent place.
|
||||||
|
files, err := ioutil.ReadDir(testDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for _, f := range files {
|
||||||
|
if filepath.Ext(f.Name()) != ".js" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Log(f.Name(), "------")
|
||||||
|
content, err := ioutil.ReadFile(filepath.Join(testDir, f.Name()))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
conf, err := ExecuteJavascript(string(content), true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
actualJson, err := json.MarshalIndent(conf, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
expectedFile := filepath.Join(testDir, f.Name()[:len(f.Name())-3]+".json")
|
||||||
|
expectedData, err := ioutil.ReadFile(expectedFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
conf = &models.DNSConfig{}
|
||||||
|
err = json.Unmarshal(expectedData, conf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
expectedJson, err := json.MarshalIndent(conf, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if string(expectedJson) != string(actualJson) {
|
||||||
|
t.Error("Expected and actual json don't match")
|
||||||
|
t.Log("Expected:", string(expectedJson))
|
||||||
|
t.Log("Actual:", string(actualJson))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
6
js/parse_tests/001-basic.js
Normal file
6
js/parse_tests/001-basic.js
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
|
||||||
|
var REG = NewRegistrar("Third-Party","NONE");
|
||||||
|
var CF = NewDSP("Cloudflare", "CLOUDFLAREAPI")
|
||||||
|
D("foo.com",REG,CF,
|
||||||
|
A("@","1.2.3.4")
|
||||||
|
);
|
30
js/parse_tests/001-basic.json
Normal file
30
js/parse_tests/001-basic.json
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
{
|
||||||
|
"registrars": [
|
||||||
|
{
|
||||||
|
"name": "Third-Party",
|
||||||
|
"type": "NONE"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"dns_service_providers": [
|
||||||
|
{
|
||||||
|
"name": "Cloudflare",
|
||||||
|
"type": "CLOUDFLAREAPI"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"domains": [
|
||||||
|
{
|
||||||
|
"name": "foo.com",
|
||||||
|
"registrar": "Third-Party",
|
||||||
|
"dsps": [
|
||||||
|
"Cloudflare"
|
||||||
|
],
|
||||||
|
"records": [
|
||||||
|
{
|
||||||
|
"type": "A",
|
||||||
|
"name": "@",
|
||||||
|
"target": "1.2.3.4"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
5
js/parse_tests/002-ttl.js
Normal file
5
js/parse_tests/002-ttl.js
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
var REG = NewRegistrar("Third-Party","NONE");
|
||||||
|
var CF = NewDSP("Cloudflare", "CLOUDFLAREAPI")
|
||||||
|
D("foo.com",REG,CF,
|
||||||
|
A("@","1.2.3.4",TTL(42))
|
||||||
|
);
|
31
js/parse_tests/002-ttl.json
Normal file
31
js/parse_tests/002-ttl.json
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
{
|
||||||
|
"registrars": [
|
||||||
|
{
|
||||||
|
"name": "Third-Party",
|
||||||
|
"type": "NONE"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"dns_service_providers": [
|
||||||
|
{
|
||||||
|
"name": "Cloudflare",
|
||||||
|
"type": "CLOUDFLAREAPI"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"domains": [
|
||||||
|
{
|
||||||
|
"name": "foo.com",
|
||||||
|
"registrar": "Third-Party",
|
||||||
|
"dsps": [
|
||||||
|
"Cloudflare"
|
||||||
|
],
|
||||||
|
"records": [
|
||||||
|
{
|
||||||
|
"type": "A",
|
||||||
|
"name": "@",
|
||||||
|
"target": "1.2.3.4",
|
||||||
|
"ttl": 42
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
5
js/parse_tests/003-meta.js
Normal file
5
js/parse_tests/003-meta.js
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
|
||||||
|
var CLOUDFLARE = NewRegistrar("Cloudflare","CLOUDFLAREAPI");
|
||||||
|
D("foo.com",CLOUDFLARE,
|
||||||
|
A("@","1.2.3.4",{"cloudflare_proxy":"ON"})
|
||||||
|
);
|
26
js/parse_tests/003-meta.json
Normal file
26
js/parse_tests/003-meta.json
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
{
|
||||||
|
"registrars": [
|
||||||
|
{
|
||||||
|
"name": "Cloudflare",
|
||||||
|
"type": "CLOUDFLAREAPI"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"dns_service_providers": [],
|
||||||
|
"domains": [
|
||||||
|
{
|
||||||
|
"name": "foo.com",
|
||||||
|
"registrar": "Cloudflare",
|
||||||
|
"dsps": [],
|
||||||
|
"records": [
|
||||||
|
{
|
||||||
|
"type": "A",
|
||||||
|
"name": "@",
|
||||||
|
"target": "1.2.3.4",
|
||||||
|
"meta": {
|
||||||
|
"cloudflare_proxy": "ON"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
10
js/parse_tests/004-ips.js
Normal file
10
js/parse_tests/004-ips.js
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
var REG = NewRegistrar("Third-Party","NONE");
|
||||||
|
var CF = NewDSP("Cloudflare", "CLOUDFLAREAPI")
|
||||||
|
|
||||||
|
var BASE = IP("1.2.3.4")
|
||||||
|
|
||||||
|
D("foo.com",REG,CF,
|
||||||
|
A("@",BASE),
|
||||||
|
A("p1",BASE+1),
|
||||||
|
A("p255", BASE+255)
|
||||||
|
);
|
28
js/parse_tests/004-ips.json
Normal file
28
js/parse_tests/004-ips.json
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
{
|
||||||
|
"registrars": [
|
||||||
|
{
|
||||||
|
"name": "Third-Party",
|
||||||
|
"type": "NONE"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"dns_service_providers": [
|
||||||
|
{
|
||||||
|
"name": "Cloudflare",
|
||||||
|
"type": "CLOUDFLAREAPI"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"domains": [
|
||||||
|
{
|
||||||
|
"name": "foo.com",
|
||||||
|
"registrar": "Third-Party",
|
||||||
|
"dsps": [
|
||||||
|
"Cloudflare"
|
||||||
|
],
|
||||||
|
"records": [
|
||||||
|
{ "type": "A","name": "@","target": "1.2.3.4"},
|
||||||
|
{ "type": "A","name": "p1","target": "1.2.3.5"},
|
||||||
|
{ "type": "A","name": "p255","target": "1.2.4.3"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
7
js/parse_tests/005-multipleDomains.js
Normal file
7
js/parse_tests/005-multipleDomains.js
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
|
||||||
|
var REG = NewRegistrar("Third-Party","NONE");
|
||||||
|
var CF = NewDSP("Cloudflare", "CLOUDFLAREAPI")
|
||||||
|
D("foo.com",REG,CF,
|
||||||
|
A("@","1.2.3.4")
|
||||||
|
);
|
||||||
|
D("foo.com",REG);
|
35
js/parse_tests/005-multipleDomains.json
Normal file
35
js/parse_tests/005-multipleDomains.json
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
{ "registrars": [
|
||||||
|
{
|
||||||
|
"name": "Third-Party",
|
||||||
|
"type": "NONE"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"dns_service_providers": [
|
||||||
|
{
|
||||||
|
"name": "Cloudflare",
|
||||||
|
"type": "CLOUDFLAREAPI"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"domains": [
|
||||||
|
{
|
||||||
|
"name": "foo.com",
|
||||||
|
"registrar": "Third-Party",
|
||||||
|
"dsps": [
|
||||||
|
"Cloudflare"
|
||||||
|
],
|
||||||
|
"records": [
|
||||||
|
{
|
||||||
|
"type": "A",
|
||||||
|
"name": "@",
|
||||||
|
"target": "1.2.3.4"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "foo.com",
|
||||||
|
"registrar": "Third-Party",
|
||||||
|
"dsps": [],
|
||||||
|
"records": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
253
js/static.go
Normal file
253
js/static.go
Normal file
|
@ -0,0 +1,253 @@
|
||||||
|
package js
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"encoding/base64"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type _escLocalFS struct{}
|
||||||
|
|
||||||
|
var _escLocal _escLocalFS
|
||||||
|
|
||||||
|
type _escStaticFS struct{}
|
||||||
|
|
||||||
|
var _escStatic _escStaticFS
|
||||||
|
|
||||||
|
type _escDirectory struct {
|
||||||
|
fs http.FileSystem
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type _escFile struct {
|
||||||
|
compressed string
|
||||||
|
size int64
|
||||||
|
modtime int64
|
||||||
|
local string
|
||||||
|
isDir bool
|
||||||
|
|
||||||
|
once sync.Once
|
||||||
|
data []byte
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_escLocalFS) Open(name string) (http.File, error) {
|
||||||
|
f, present := _escData[path.Clean(name)]
|
||||||
|
if !present {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
return os.Open(f.local)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_escStaticFS) prepare(name string) (*_escFile, error) {
|
||||||
|
f, present := _escData[path.Clean(name)]
|
||||||
|
if !present {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
f.once.Do(func() {
|
||||||
|
f.name = path.Base(name)
|
||||||
|
if f.size == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var gr *gzip.Reader
|
||||||
|
b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed))
|
||||||
|
gr, err = gzip.NewReader(b64)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.data, err = ioutil.ReadAll(gr)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs _escStaticFS) Open(name string) (http.File, error) {
|
||||||
|
f, err := fs.prepare(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.File()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dir _escDirectory) Open(name string) (http.File, error) {
|
||||||
|
return dir.fs.Open(dir.name + name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) File() (http.File, error) {
|
||||||
|
type httpFile struct {
|
||||||
|
*bytes.Reader
|
||||||
|
*_escFile
|
||||||
|
}
|
||||||
|
return &httpFile{
|
||||||
|
Reader: bytes.NewReader(f.data),
|
||||||
|
_escFile: f,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Stat() (os.FileInfo, error) {
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Size() int64 {
|
||||||
|
return f.size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Mode() os.FileMode {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) ModTime() time.Time {
|
||||||
|
return time.Unix(f.modtime, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) IsDir() bool {
|
||||||
|
return f.isDir
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Sys() interface{} {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// FS returns a http.Filesystem for the embedded assets. If useLocal is true,
|
||||||
|
// the filesystem's contents are instead used.
|
||||||
|
func FS(useLocal bool) http.FileSystem {
|
||||||
|
if useLocal {
|
||||||
|
return _escLocal
|
||||||
|
}
|
||||||
|
return _escStatic
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dir returns a http.Filesystem for the embedded assets on a given prefix dir.
|
||||||
|
// If useLocal is true, the filesystem's contents are instead used.
|
||||||
|
func Dir(useLocal bool, name string) http.FileSystem {
|
||||||
|
if useLocal {
|
||||||
|
return _escDirectory{fs: _escLocal, name: name}
|
||||||
|
}
|
||||||
|
return _escDirectory{fs: _escStatic, name: name}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FSByte returns the named file from the embedded assets. If useLocal is
|
||||||
|
// true, the filesystem's contents are instead used.
|
||||||
|
func FSByte(useLocal bool, name string) ([]byte, error) {
|
||||||
|
if useLocal {
|
||||||
|
f, err := _escLocal.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b, err := ioutil.ReadAll(f)
|
||||||
|
f.Close()
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
f, err := _escStatic.prepare(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FSMustByte is the same as FSByte, but panics if name is not present.
|
||||||
|
func FSMustByte(useLocal bool, name string) []byte {
|
||||||
|
b, err := FSByte(useLocal, name)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// FSString is the string version of FSByte.
|
||||||
|
func FSString(useLocal bool, name string) (string, error) {
|
||||||
|
b, err := FSByte(useLocal, name)
|
||||||
|
return string(b), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FSMustString is the string version of FSMustByte.
|
||||||
|
func FSMustString(useLocal bool, name string) string {
|
||||||
|
return string(FSMustByte(useLocal, name))
|
||||||
|
}
|
||||||
|
|
||||||
|
var _escData = map[string]*_escFile{
|
||||||
|
|
||||||
|
"/helpers.js": {
|
||||||
|
local: "js/helpers.js",
|
||||||
|
size: 6427,
|
||||||
|
modtime: 0,
|
||||||
|
compressed: `
|
||||||
|
H4sIAAAJbogA/7RY7W/bvBH/nr/iJmC1tOiR89Jkg1wP8560D4rVTpC4WwDDMBiJtplKokDSdrPA+dsH
|
||||||
|
vkiiXrwkH9oPqUXeHX/3wuPdORuOgQtGIuEMjo62iEFEsyUM4fkIAIDhFeGCIcZDmM19tRZnfMEx25II
|
||||||
|
L3JGtyTGtW2aIpKphaO9kRnjJdok4ornHIYwmw+OjpabLBKEZkAyIghKyH+x6+lDawgOoXgDkiYa+b0f
|
||||||
|
aJAtQHsL0gTvbosj3Qyl2BdPOfZTLJBnYJEluHLRK2HKLxgOwRmPJt9H3xx90F79lTZgeCWVkuJCUEIV
|
||||||
|
S6j++iCFh+qvgSitEFSaB/mGr12GV97AeEZsWKYEtcBf3d241QlatgUcXAWdLtUGDIdD6NGHRxyJngcf
|
||||||
|
PoDbI/kiotkWM05oxntAMi3Ds5wiF4I6IQxhSVmKxEIIt2Pfa5gk5vn7TdLpdG2dmOevWSfDuysVEtpA
|
||||||
|
pX29MuAVYw1TSRRWPw26573cjiiLeTib+1IjHYBFhE2n30I48ZUkiVoG6Gy+r4PKGY0w51eIrbib+iZo
|
||||||
|
bWP3+9KygFG0hpTGZEkw86UviQDCAQVBUKM1kkOIUJJIoh0RayPXJkSMoaewACBV2TBOtjh5sql0cEhX
|
||||||
|
sBVWR2aCKgPESCCbUqaSbBUC4nyT4gKdNEtJJW/OIiD8i8HoprWwKqLLNUYYlDt7wAnHJf9IQu9glnZy
|
||||||
|
ZXQ9qrBty65be/Y4Lw1eI9wfOvhaWaPj5EWAfwqcxQZ6IA3kp4c1uFPG6hBk+GUw6cDuEGJzRDTjNMFB
|
||||||
|
Qleu85/R7eTr5I/QSCnDRSeoTcY3eU6ZwHEIjr5vMhH44IC+GGq5aZC9jNd+H66a1yaE3xlGAgOCq8md
|
||||||
|
ERHAd45BrDHkiKEUC8w4IF7cFEBZLGHxoLoCLcFGQZUmtCLDw5dXW6f0PIEhnA2AfEJstUlxJniQ4Gwl
|
||||||
|
1gMgx8e2tSV1CkMoCWdkXpn6wL1sZDFBR3EMQ1i41qviBTFZLjHDWYRdy58G6sJVXF4gb7RbWMH96cFz
|
||||||
|
2/s/vb1m0/lPv2gm4xlE2jvT6Td364Vwh4Wy/nT6TRlF+0Zb37K5Jq8nvhIKs83EAiESGMK2eNRMNJQ5
|
||||||
|
rnasMUN5vFrTSlkOt3kPYIhtDHFQpdQ2lJEOCZIX+Xhswp4HQeBVxxo6ILkdYTIYYQgrLEo2twwJ/8x7
|
||||||
|
HR2K41t1rhv7zsjxCzRSsldHOhq9GWxJ+ovxjkb/F/Lvk9H4symEEFth8Qpuix40wy8Erw4z6A26tgbT
|
||||||
|
++k78JfUvx799H76GvbxvQaTM0IZEU9v06HggpLt3cqcv0EZlcZVKirOsZ4qW1NwxveOD7ZZfWgrO7l7
|
||||||
|
h58K4l/vpsnda16SUXj3+fbfn29tBWywDYIG6Fdyn1U/anPXq2YlKjT/7y1k5fFVYS4Yyrj8XAj0kJgO
|
||||||
|
Rt4Ref5sltBdCKc+rMlqHcKZL1/dfyKOQzif+6C3PxbbF2r7600Il/O5FqNqQ+cUXuAMXuAcXgbwEV7g
|
||||||
|
Al4AXuDSOdIOSkiGde91ZL/cw5MBEPgEDZBd77eilw1Eg7Z8wiWBQgdDIHmgfg7K7k19es9WXWqVlXrT
|
||||||
|
q5dlhaxFkKJck/ilv4j3XDQdm/QspsIl3t4LHinJXMd3rFJKlm/dggtOfbpV8jVbSbqTljP75Xl5kNCd
|
||||||
|
57eXpa+61o1nqy15rvqtm2XlI9N40p3RBV7A8aQ6Eo9RWROa/QE4RUHydXxzfTtdTG9Hk7sv17djHXsJ
|
||||||
|
khbTzqqKqjJS38H0lrtTT0JN4Y4Pzj/Kgtcvjar/PfcasdULmxfJxuXt5/WkcPP99o/PrqWbXjD44uBf
|
||||||
|
GOffsx8Z3cnydokSjot0cr1oMZdrB/gF2+DanW9mP+5zgVhXnpzNO0poRTxQVfTBArrK/5JqRuZ2dWz8
|
||||||
|
ImnqDa/tE9Xrt3KrOULmk6VJa7KVzDbpg+x8i/4yl6IY5jwAPWcQQERQ3nF5oSeKxTXZ1sZuxFb3ztC0
|
||||||
|
JzcRDOHZHk0cTr4+CJGEdrFaPcFqEmDmBmak0d3YxzgiMYYHxHEMNNNTkYL+N/jSaO+5bu9lna3fS9ls
|
||||||
|
ya/ixatYrztbeUlba+cVrbZcCF+/wPi+kmx19oVipcFt37XiSab2TzpiDkQTWC2ZpJuReW3vbbMDSF2G
|
||||||
|
Iyt/wjuaeNDqF9FU3n8OgprxBm8zKN2Dkhg+fABrRlFtNJ+UErHFWxuiWaxtxn1rqRxBMBy15w9vp2pY
|
||||||
|
y9yhVI0Hq0HnvdNhPSmziAvpxk7BbSt0zzDGB4cX9dmFe/eD5DnJVn/ynKYqnc9oHJhhRDFXlfGiUi/J
|
||||||
|
oRpZlm8KhyWjKayFyMN+nwsU/aBbzJYJ3QURTfuo/7fTk4u/fjzpn56dXl6eyBy+JahgeERbxCNGchGg
|
||||||
|
B7oRiichDwyxp/5DQnITb8FapNZLeOPGVHhH1igEhhBTEfA8IcLtBb36vNNV/47j2cnc+8vZxaV3LD9O
|
||||||
|
5571dVb7Op97jQFpUYNs0uJgspRfak68yWK8JBmOPXs6r852ahPvxoxLSmuzZJu0ORHW2fjPZxeXHQ/S
|
||||||
|
uawN/67yyG+/6ftQyVQQYYzEOlgmlDJ5Zl/qWYWDJR2OoRf04BjiQfvBiqVJ/hcAAP//Ocw/QBsZAAA=
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
|
||||||
|
"/tester.html": {
|
||||||
|
local: "js/tester.html",
|
||||||
|
size: 953,
|
||||||
|
modtime: 0,
|
||||||
|
compressed: `
|
||||||
|
H4sIAAAJbogA/2yST2/bPAzG7/4UhN73YCOr1TrrOjSWDwM6YD10w1AM6IocHFmJ6SliJsnZsiLffZD/
|
||||||
|
NAmykynqx4cPKee1X+siymtVVkUEAJA7aXHjwVkpWO39xt1yLqlSafOzVXaXSlrzPrzI0iydpms0aeMY
|
||||||
|
oPFqZdHvBHN1mV2/uyiz6erqwb+t/N3T5+Zm8XWyfWxvJo/vv325mtbt+vvHD/cPn57o7p4EA2nJObK4
|
||||||
|
QiNYacjs1tQ6VuS8t1REOe995guqdoPdelpUxkky3pKGxoFXziub83o6EF799qVVJWAlGBoGln45wa4v
|
||||||
|
GUjSTrDs8jJ0GblR2PIhWrTekwEyUqP8IVjocO/ihBWPyvmc9/dnZRVuu5ZWuVb7bpAKt0UUna34lvPW
|
||||||
|
VMo6SVY1LiW7OkpcDAs+XsSJgtIbZd2/kSJatkZ6JAOj7ZfOHhr0WGr8o+Jk1mW2pYXw0CDg/5j9h4Yl
|
||||||
|
6bbU47Uk40irVNMqDtiQVgE5Op9iZjmkg+K4iCQNf13cODJZF3VcMov2gTwYPgAhSuDl1Sa+Aas8CGCs
|
||||||
|
lw+niQCWt7oYUkuyMSBgrzNWn8AaCzbBCbuFoaZbzDIGv9soWnaFzzgHIQQwWjRKepaM5afunnGeHDSU
|
||||||
|
duqYe8b57Lw9D/37/P50Cn4YwyrfWhM+s2gfvb7v3wAAAP//jV4zrLkDAAA=
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
|
||||||
|
"/": {
|
||||||
|
isDir: true,
|
||||||
|
local: "js",
|
||||||
|
},
|
||||||
|
}
|
321
main.go
Normal file
321
main.go
Normal file
|
@ -0,0 +1,321 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/js"
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"github.com/StackExchange/dnscontrol/normalize"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers/config"
|
||||||
|
"github.com/StackExchange/dnscontrol/web"
|
||||||
|
|
||||||
|
//Define all known providers here. They should each register themselves with the providers package via init function.
|
||||||
|
_ "github.com/StackExchange/dnscontrol/providers/activedir"
|
||||||
|
_ "github.com/StackExchange/dnscontrol/providers/bind"
|
||||||
|
_ "github.com/StackExchange/dnscontrol/providers/cloudflare"
|
||||||
|
_ "github.com/StackExchange/dnscontrol/providers/gandi"
|
||||||
|
_ "github.com/StackExchange/dnscontrol/providers/namecheap"
|
||||||
|
_ "github.com/StackExchange/dnscontrol/providers/namedotcom"
|
||||||
|
_ "github.com/StackExchange/dnscontrol/providers/route53"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate esc -modtime 0 -o js/static.go -pkg js -ignore go -prefix js js
|
||||||
|
//go:generate esc -modtime 0 -o web/static.go -pkg web -include=bundle\.js -ignore node_modules -prefix web web
|
||||||
|
|
||||||
|
// One of these config options must be set.
|
||||||
|
var jsFile = flag.String("js", "dnsconfig.js", "Javascript file containing dns config")
|
||||||
|
var stdin = flag.Bool("stdin", false, "Read domain config JSON from stdin")
|
||||||
|
var jsonInput = flag.String("json", "", "Read domain config from specified JSON file.")
|
||||||
|
|
||||||
|
var jsonOutputPre = flag.String("debugrawjson", "", "Write JSON intermediate to this file pre-normalization.")
|
||||||
|
var jsonOutputPost = flag.String("debugjson", "", "During preview, write JSON intermediate to this file instead of stdout.")
|
||||||
|
|
||||||
|
var configFile = flag.String("creds", "creds.json", "Provider credentials JSON file")
|
||||||
|
var devMode = flag.Bool("dev", false, "Use helpers.js from disk instead of embedded")
|
||||||
|
|
||||||
|
var flagProviders = flag.String("providers", "", "Providers to enable (comma seperated list); default is all-but-bind. Specify 'all' for all (including bind)")
|
||||||
|
var domains = flag.String("domains", "", "Comma seperated list of domain names to include")
|
||||||
|
|
||||||
|
var interactive = flag.Bool("i", false, "Confirm or Exclude each correction before they run")
|
||||||
|
|
||||||
|
var (
|
||||||
|
SHA = ""
|
||||||
|
Version = ""
|
||||||
|
BuildTime = ""
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||||
|
flag.Parse()
|
||||||
|
command := flag.Arg(0)
|
||||||
|
if command == "version" {
|
||||||
|
printVersion()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if command == "web" {
|
||||||
|
runWebServer()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var dnsConfig *models.DNSConfig
|
||||||
|
if *stdin {
|
||||||
|
log.Fatal("Read from stdin not implemented yet.")
|
||||||
|
} else if *jsonInput != "" {
|
||||||
|
log.Fatal("Direct JSON read not implemented")
|
||||||
|
} else if *jsFile != "" {
|
||||||
|
text, err := ioutil.ReadFile(*jsFile)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error reading %v: %v\n", *jsFile, err)
|
||||||
|
}
|
||||||
|
dnsConfig, err = js.ExecuteJavascript(string(text), *devMode)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error executing javasscript in (%v): %v", *jsFile, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dnsConfig == nil {
|
||||||
|
log.Fatal("No config specified.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if flag.NArg() != 1 {
|
||||||
|
fmt.Println("Usage: dnscontrol [options] cmd")
|
||||||
|
fmt.Println(" cmd:")
|
||||||
|
fmt.Println(" preview: Show changed that would happen.")
|
||||||
|
fmt.Println(" push: Make changes for real.")
|
||||||
|
fmt.Println(" version: Print program version string.")
|
||||||
|
fmt.Println(" print: Print compiled data.")
|
||||||
|
fmt.Println("")
|
||||||
|
flag.PrintDefaults()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if *jsonOutputPre != "" {
|
||||||
|
dat, _ := json.MarshalIndent(dnsConfig, "", " ")
|
||||||
|
err := ioutil.WriteFile(*jsonOutputPre, dat, 0644)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errs := normalize.NormalizeAndValidateConfig(dnsConfig)
|
||||||
|
if len(errs) > 0 {
|
||||||
|
fmt.Printf("%d Validation errors:\n", len(errs))
|
||||||
|
for i, err := range errs {
|
||||||
|
fmt.Printf("%d: %s\n", i+1, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if command == "print" {
|
||||||
|
dat, _ := json.MarshalIndent(dnsConfig, "", " ")
|
||||||
|
if *jsonOutputPost == "" {
|
||||||
|
fmt.Println("While running JS:", string(dat))
|
||||||
|
} else {
|
||||||
|
err := ioutil.WriteFile(*jsonOutputPost, dat, 0644)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
providerConfigs, err := config.LoadProviderConfigs(*configFile)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error loading provider configurations: %s", err)
|
||||||
|
}
|
||||||
|
registrars, err := providers.CreateRegistrars(dnsConfig, providerConfigs)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error creating registrars: %v\n", err)
|
||||||
|
}
|
||||||
|
dsps, err := providers.CreateDsps(dnsConfig, providerConfigs)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error creating dsps: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Initialized %d registrars and %d dns service providers.\n", len(registrars), len(dsps))
|
||||||
|
anyErrors, totalCorrections := false, 0
|
||||||
|
switch command {
|
||||||
|
case "preview", "push":
|
||||||
|
DomainLoop:
|
||||||
|
for _, domain := range dnsConfig.Domains {
|
||||||
|
if !shouldRunDomain(domain.Name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("******************** Domain: %s\n", domain.Name)
|
||||||
|
for pi, prov := range domain.Dsps {
|
||||||
|
|
||||||
|
dc, err := domain.Copy()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
shouldrun := shouldRunProvider(prov)
|
||||||
|
if shouldrun {
|
||||||
|
fmt.Printf("----- DNS Provider: %s\n", prov)
|
||||||
|
} else {
|
||||||
|
if pi == 0 {
|
||||||
|
fmt.Printf("----- DNS Provider: %s (read-only)\n", prov)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("----- DNS Provider: %s (skipping)\n", prov)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dsp, ok := dsps[prov]
|
||||||
|
if !ok {
|
||||||
|
log.Fatalf("DSP %s not declared.", prov)
|
||||||
|
}
|
||||||
|
corrections, err := dsp.GetDomainCorrections(dc)
|
||||||
|
if err != nil {
|
||||||
|
anyErrors = true
|
||||||
|
fmt.Printf("Error getting corrections: %s\n", err)
|
||||||
|
continue DomainLoop
|
||||||
|
}
|
||||||
|
storeNameservers(dc, domain)
|
||||||
|
if !shouldrun {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalCorrections += len(corrections)
|
||||||
|
anyErrors = printOrRunCorrections(corrections, command) || anyErrors
|
||||||
|
}
|
||||||
|
if !shouldRunProvider(domain.Registrar) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("----- Registrar: %s\n", domain.Registrar)
|
||||||
|
reg, ok := registrars[domain.Registrar]
|
||||||
|
if !ok {
|
||||||
|
log.Fatalf("Registrar %s not declared.", reg)
|
||||||
|
}
|
||||||
|
if len(domain.Nameservers) == 0 {
|
||||||
|
//fmt.Printf("No nameservers declared; skipping registrar.\n")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dc, err := domain.Copy()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
corrections, err := reg.GetRegistrarCorrections(dc)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error getting corrections: %s\n", err)
|
||||||
|
anyErrors = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalCorrections += len(corrections)
|
||||||
|
anyErrors = printOrRunCorrections(corrections, command) || anyErrors
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Fatalf("Unknown command %s", command)
|
||||||
|
}
|
||||||
|
if os.Getenv("TEAMCITY_VERSION") != "" {
|
||||||
|
fmt.Fprintf(os.Stderr, "##teamcity[buildStatus status='SUCCESS' text='%d corrections']", totalCorrections)
|
||||||
|
}
|
||||||
|
fmt.Printf("Done. %d corrections.\n", totalCorrections)
|
||||||
|
if anyErrors {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var reader = bufio.NewReader(os.Stdin)
|
||||||
|
|
||||||
|
func printOrRunCorrections(corrections []*models.Correction, command string) (anyErrors bool) {
|
||||||
|
anyErrors = false
|
||||||
|
if len(corrections) == 0 {
|
||||||
|
return anyErrors
|
||||||
|
}
|
||||||
|
for i, correction := range corrections {
|
||||||
|
fmt.Printf("#%d: %s\n", i+1, correction.Msg)
|
||||||
|
if command == "push" {
|
||||||
|
if *interactive {
|
||||||
|
fmt.Print("Run? (Y/n): ")
|
||||||
|
txt, err := reader.ReadString('\n')
|
||||||
|
run := true
|
||||||
|
if err != nil {
|
||||||
|
run = false
|
||||||
|
}
|
||||||
|
txt = strings.ToLower(strings.TrimSpace(txt))
|
||||||
|
if txt != "y" {
|
||||||
|
run = false
|
||||||
|
}
|
||||||
|
if !run {
|
||||||
|
fmt.Println("Skipping")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err := correction.F()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("FAILURE!", err)
|
||||||
|
anyErrors = true
|
||||||
|
} else {
|
||||||
|
fmt.Println("SUCCESS!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return anyErrors
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldRunProvider(p string) bool {
|
||||||
|
if *flagProviders == "all" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if *flagProviders == "" {
|
||||||
|
return p != "bind"
|
||||||
|
// NOTE(tlim): Hardcoding bind is a hacky way to make it off by default.
|
||||||
|
// As a result, bind only runs if you list it in -providers or use
|
||||||
|
// -providers=all.
|
||||||
|
// If you always want bind to run, call it something else in dnsconfig.js
|
||||||
|
// for example `NewDSP('bindyes', 'BIND',`.
|
||||||
|
// We don't want this hack, but we shouldn't need this in the future
|
||||||
|
// so it doesn't make sense to write a lot of code to make it work.
|
||||||
|
// In the future, the above `return p != "bind"` can become `return true`.
|
||||||
|
// Alternatively we might want to add a complex system that permits
|
||||||
|
// fancy whitelist/blacklisting of providers with defaults and so on.
|
||||||
|
// In that case, all of this hack will go away.
|
||||||
|
}
|
||||||
|
for _, prov := range strings.Split(*flagProviders, ",") {
|
||||||
|
if prov == p {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldRunDomain(d string) bool {
|
||||||
|
if *domains == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for _, dom := range strings.Split(*domains, ",") {
|
||||||
|
if dom == d {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func storeNameservers(from, to *models.DomainConfig) {
|
||||||
|
if len(to.Nameservers) == 0 && len(from.Nameservers) > 0 {
|
||||||
|
to.Nameservers = from.Nameservers
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// printVersion prints the version banner.
|
||||||
|
func printVersion() {
|
||||||
|
if Version == "" {
|
||||||
|
Version = "dev"
|
||||||
|
}
|
||||||
|
sha := ""
|
||||||
|
if SHA != "" {
|
||||||
|
sha = fmt.Sprintf(" (%s)", SHA)
|
||||||
|
}
|
||||||
|
if BuildTime != "" {
|
||||||
|
sha = sha + fmt.Sprintf(" built %s", BuildTime)
|
||||||
|
}
|
||||||
|
fmt.Printf("dnscontrol %s%s\n", Version, sha)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runWebServer() {
|
||||||
|
fmt.Printf("Running Webserver on :8080 (js = %s , creds = %s)", *jsFile, *configFile)
|
||||||
|
web.Serve(*jsFile, *configFile, *devMode)
|
||||||
|
}
|
204
models/dns.go
Normal file
204
models/dns.go
Normal file
|
@ -0,0 +1,204 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/gob"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
"github.com/StackExchange/dnscontrol/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
const DefaultTTL = uint32(300)
|
||||||
|
|
||||||
|
type DNSConfig struct {
|
||||||
|
Registrars []*RegistrarConfig `json:"registrars"`
|
||||||
|
DNSProviders []*DNSProviderConfig `json:"dns_service_providers"`
|
||||||
|
Domains []*DomainConfig `json:"domains"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *DNSConfig) FindDomain(query string) *DomainConfig {
|
||||||
|
for _, b := range config.Domains {
|
||||||
|
if b.Name == query {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type RegistrarConfig struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Metadata json.RawMessage `json:"meta,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DNSProviderConfig struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Metadata json.RawMessage `json:"meta,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordConfig stores a DNS record.
|
||||||
|
// Providers are responsible for validating or normalizing the data
|
||||||
|
// that goes into a RecordConfig.
|
||||||
|
// If you update Name, you have to update NameFQDN and vice-versa.
|
||||||
|
//
|
||||||
|
// Name:
|
||||||
|
// This is the shortname i.e. the NameFQDN without the origin suffix.
|
||||||
|
// It should never have a trailing "."
|
||||||
|
// It should never be null. It should store It "@", not the apex domain, not null, etc.
|
||||||
|
// It shouldn't end with the domain origin. If the origin is "foo.com." then
|
||||||
|
// if Name == "foo.com" then that literally means "foo.com.foo.com." is
|
||||||
|
// the intended FQDN.
|
||||||
|
// NameFQDN:
|
||||||
|
// This is the FQDN version of Name.
|
||||||
|
// It should never have a trailiing ".".
|
||||||
|
type RecordConfig struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Name string `json:"name"` // The short name. See below.
|
||||||
|
Target string `json:"target"` // If a name, must end with "."
|
||||||
|
TTL uint32 `json:"ttl,omitempty"`
|
||||||
|
Metadata map[string]string `json:"meta,omitempty"`
|
||||||
|
NameFQDN string `json:"-"` // Must end with ".$origin". See below.
|
||||||
|
Priority uint16 `json:"priority,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RecordConfig) GetName() string {
|
||||||
|
return r.NameFQDN
|
||||||
|
}
|
||||||
|
func (r *RecordConfig) GetType() string {
|
||||||
|
return r.Type
|
||||||
|
}
|
||||||
|
func (r *RecordConfig) GetContent() string {
|
||||||
|
return r.Target
|
||||||
|
}
|
||||||
|
func (r *RecordConfig) GetComparisionData() string {
|
||||||
|
mxPrio := ""
|
||||||
|
if r.Type == "MX" {
|
||||||
|
mxPrio = fmt.Sprintf(" %d ", r.Priority)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d%s", r.TTL, mxPrio)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert RecordConfig -> dns.RR.
|
||||||
|
func (r *RecordConfig) RR() dns.RR {
|
||||||
|
|
||||||
|
// Note: The label is a FQDN ending in a ".". It will not put "@" in the Name field.
|
||||||
|
|
||||||
|
// NB(tlim): An alternative way to do this would be
|
||||||
|
// to create the rr via: rr := TypeToRR[x]()
|
||||||
|
// then set the parameters. A benchmark may find that
|
||||||
|
// faster. This was faster to implement.
|
||||||
|
|
||||||
|
rdtype, ok := dns.StringToType[r.Type]
|
||||||
|
if !ok {
|
||||||
|
log.Fatalf("No such DNS type as (%#v)\n", r.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr := dns.RR_Header{
|
||||||
|
Name: r.NameFQDN + ".",
|
||||||
|
Rrtype: rdtype,
|
||||||
|
Class: dns.ClassINET,
|
||||||
|
Ttl: r.TTL,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle some special cases:
|
||||||
|
switch rdtype {
|
||||||
|
case dns.TypeMX:
|
||||||
|
// Has a Priority field.
|
||||||
|
return &dns.MX{Hdr: hdr, Preference: r.Priority, Mx: r.Target}
|
||||||
|
case dns.TypeTXT:
|
||||||
|
// Assure no problems due to quoting/unquoting:
|
||||||
|
return &dns.TXT{Hdr: hdr, Txt: []string{r.Target}}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
var ttl string
|
||||||
|
if r.TTL == 0 {
|
||||||
|
ttl = strconv.FormatUint(uint64(DefaultTTL), 10)
|
||||||
|
} else {
|
||||||
|
ttl = strconv.FormatUint(uint64(r.TTL), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := fmt.Sprintf("%s %s IN %s %s", r.NameFQDN, ttl, r.Type, r.Target)
|
||||||
|
rc, err := dns.NewRR(s)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("NewRR rejected RecordConfig: %#v (t=%#v)\n%v\n", s, r.Target, err)
|
||||||
|
}
|
||||||
|
return rc
|
||||||
|
}
|
||||||
|
|
||||||
|
type Nameserver struct {
|
||||||
|
Name string `json:"name"` // Normalized to a FQDN with NO trailing "."
|
||||||
|
Target string `json:"target"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DomainConfig struct {
|
||||||
|
Name string `json:"name"` // NO trailing "."
|
||||||
|
Registrar string `json:"registrar"`
|
||||||
|
Dsps []string `json:"dsps"`
|
||||||
|
Metadata map[string]string `json:"meta,omitempty"`
|
||||||
|
Records []*RecordConfig `json:"records"`
|
||||||
|
Nameservers []*Nameserver `json:"nameservers,omitempty"`
|
||||||
|
KeepUnknown bool `json:"keepunknown"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dc *DomainConfig) Copy() (*DomainConfig, error) {
|
||||||
|
newDc := &DomainConfig{}
|
||||||
|
err := copyObj(dc, newDc)
|
||||||
|
return newDc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RecordConfig) Copy() (*RecordConfig, error) {
|
||||||
|
newR := &RecordConfig{}
|
||||||
|
err := copyObj(r, newR)
|
||||||
|
return newR, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyObj(input interface{}, output interface{}) error {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
enc := gob.NewEncoder(buf)
|
||||||
|
dec := gob.NewDecoder(buf)
|
||||||
|
if err := enc.Encode(input); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := dec.Decode(output); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dc *DomainConfig) HasRecordTypeName(rtype, name string) bool {
|
||||||
|
for _, r := range dc.Records {
|
||||||
|
if r.Type == rtype && r.Name == name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func InterfaceToIP(i interface{}) (net.IP, error) {
|
||||||
|
switch v := i.(type) {
|
||||||
|
case float64:
|
||||||
|
u := uint32(v)
|
||||||
|
return transform.UintToIP(u), nil
|
||||||
|
case string:
|
||||||
|
if ip := net.ParseIP(v); ip != nil {
|
||||||
|
return ip, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("%s is not a valid ip address", v)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot convert type %s to ip.", reflect.TypeOf(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Correction is anything that can be run. Implementation is up to the specific provider.
|
||||||
|
type Correction struct {
|
||||||
|
F func() error `json:"-"`
|
||||||
|
Msg string
|
||||||
|
}
|
39
models/dns_test.go
Normal file
39
models/dns_test.go
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHasRecordTypeName(t *testing.T) {
|
||||||
|
x := &RecordConfig{
|
||||||
|
Type: "A",
|
||||||
|
Name: "@",
|
||||||
|
}
|
||||||
|
dc := DomainConfig{}
|
||||||
|
if dc.HasRecordTypeName("A", "@") {
|
||||||
|
t.Errorf("%v: expected (%v) got (%v)\n", dc.Records, false, true)
|
||||||
|
}
|
||||||
|
dc.Records = append(dc.Records, x)
|
||||||
|
if !dc.HasRecordTypeName("A", "@") {
|
||||||
|
t.Errorf("%v: expected (%v) got (%v)\n", dc.Records, true, false)
|
||||||
|
}
|
||||||
|
if dc.HasRecordTypeName("AAAA", "@") {
|
||||||
|
t.Errorf("%v: expected (%v) got (%v)\n", dc.Records, false, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRR(t *testing.T) {
|
||||||
|
experiment := RecordConfig{
|
||||||
|
Type: "A",
|
||||||
|
Name: "foo",
|
||||||
|
Target: "1.2.3.4",
|
||||||
|
TTL: 0,
|
||||||
|
NameFQDN: "foo.example.com",
|
||||||
|
Priority: 0,
|
||||||
|
}
|
||||||
|
expected := "foo.example.com.\t300\tIN\tA\t1.2.3.4"
|
||||||
|
found := experiment.RR().String()
|
||||||
|
if found != expected {
|
||||||
|
t.Errorf("RR expected (%#v) got (%#v)\n", expected, found)
|
||||||
|
}
|
||||||
|
}
|
294
normalize/validate.go
Normal file
294
normalize/validate.go
Normal file
|
@ -0,0 +1,294 @@
|
||||||
|
package normalize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
"github.com/miekg/dns/dnsutil"
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"github.com/StackExchange/dnscontrol/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Returns false if label does not validate.
|
||||||
|
func assert_no_enddot(label string) error {
|
||||||
|
if label == "@" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(label) < 1 {
|
||||||
|
return fmt.Errorf("WARNING: null label.")
|
||||||
|
}
|
||||||
|
if label[len(label)-1] == '.' {
|
||||||
|
return fmt.Errorf("WARNING: label (%v) ends with a (.)", label)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false if label does not validate.
|
||||||
|
func assert_no_underscores(label string) error {
|
||||||
|
if strings.ContainsRune(label, '_') {
|
||||||
|
return fmt.Errorf("WARNING: label (%v) contains an underscore", label)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false if target does not validate.
|
||||||
|
func assert_valid_ipv4(label string) error {
|
||||||
|
if net.ParseIP(label).To4() == nil {
|
||||||
|
return fmt.Errorf("WARNING: target (%v) is not an IPv4 address", label)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false if target does not validate.
|
||||||
|
func assert_valid_ipv6(label string) error {
|
||||||
|
if net.ParseIP(label).To16() == nil {
|
||||||
|
return fmt.Errorf("WARNING: target (%v) is not an IPv6 address", label)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assert_valid_cname_target returns 1 if target is not valid for cnames.
|
||||||
|
func assert_valid_target(label string) error {
|
||||||
|
if label == "@" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(label) < 1 {
|
||||||
|
return fmt.Errorf("WARNING: null label.")
|
||||||
|
}
|
||||||
|
// If it containts a ".", it must end in a ".".
|
||||||
|
if strings.ContainsRune(label, '.') && label[len(label)-1] != '.' {
|
||||||
|
return fmt.Errorf("WARNING: label (%v) includes a (.), must end with a (.)", label)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateRecordTypes list of valid rec.Type values. Returns true if this is a real DNS record type, false means it is a pseudo-type used internally.
|
||||||
|
func validateRecordTypes(rec *models.RecordConfig, domain_name string) error {
|
||||||
|
var valid_types = map[string]bool{
|
||||||
|
"A": true,
|
||||||
|
"AAAA": true,
|
||||||
|
"CNAME": true,
|
||||||
|
"IMPORT_TRANSFORM": false,
|
||||||
|
"MX": true,
|
||||||
|
"TXT": true,
|
||||||
|
"NS": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := valid_types[rec.Type]; !ok {
|
||||||
|
return fmt.Errorf("Unsupported record type (%v) domain=%v name=%v", rec.Type, domain_name, rec.Name)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateTargets returns true if rec.Target is valid for the rec.Type.
|
||||||
|
func validateTargets(rec *models.RecordConfig, domain_name string) (errs []error) {
|
||||||
|
label := rec.Name
|
||||||
|
target := rec.Target
|
||||||
|
check := func(e error) {
|
||||||
|
if e != nil {
|
||||||
|
errs = append(errs, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch rec.Type {
|
||||||
|
case "A":
|
||||||
|
check(assert_no_enddot(label))
|
||||||
|
check(assert_no_underscores(label))
|
||||||
|
check(assert_valid_ipv4(target))
|
||||||
|
case "AAAA":
|
||||||
|
check(assert_no_enddot(label))
|
||||||
|
check(assert_no_underscores(label))
|
||||||
|
check(assert_valid_ipv6(target))
|
||||||
|
case "CNAME":
|
||||||
|
check(assert_no_enddot(label))
|
||||||
|
check(assert_no_underscores(label))
|
||||||
|
check(assert_valid_target(target))
|
||||||
|
case "MX":
|
||||||
|
check(assert_no_enddot(label))
|
||||||
|
check(assert_no_underscores(label))
|
||||||
|
check(assert_valid_target(target))
|
||||||
|
case "NS":
|
||||||
|
check(assert_no_enddot(label))
|
||||||
|
check(assert_no_underscores(label))
|
||||||
|
check(assert_valid_target(target))
|
||||||
|
case "TXT", "IMPORT_TRANSFORM":
|
||||||
|
default:
|
||||||
|
errs = append(errs, fmt.Errorf("Unimplemented record type (%v) domain=%v name=%v",
|
||||||
|
rec.Type, domain_name, rec.Name))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func transform_cname(target, old_domain, new_domain string) string {
|
||||||
|
// Canonicalize. If it isn't a FQDN, add the new_domain.
|
||||||
|
result := dnsutil.AddOrigin(target, old_domain)
|
||||||
|
if dns.IsFqdn(result) {
|
||||||
|
result = result[:len(result)-1]
|
||||||
|
}
|
||||||
|
return dnsutil.AddOrigin(result, new_domain) + "."
|
||||||
|
}
|
||||||
|
|
||||||
|
// import_transform imports the records of one zone into another, modifying records along the way.
|
||||||
|
func import_transform(src_domain, dst_domain *models.DomainConfig, transforms []transform.IpConversion) error {
|
||||||
|
// Read src_domain.Records, transform, and append to dst_domain.Records:
|
||||||
|
// 1. Skip any that aren't A or CNAMEs.
|
||||||
|
// 2. Append dest_domainname to the end of the label.
|
||||||
|
// 3. For CNAMEs, append dest_domainname to the end of the target.
|
||||||
|
// 4. For As, change the target as described the transforms.
|
||||||
|
|
||||||
|
for _, rec := range src_domain.Records {
|
||||||
|
var newrec models.RecordConfig
|
||||||
|
newrec = *rec
|
||||||
|
newrec.Name = newrec.NameFQDN
|
||||||
|
newrec.NameFQDN = dnsutil.AddOrigin(newrec.Name, dst_domain.Name)
|
||||||
|
switch rec.Type {
|
||||||
|
case "A":
|
||||||
|
tr, err := transform.TransformIP(net.ParseIP(newrec.Target), transforms)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("import_transform: TransformIP(%v, %v) returned err=%s", newrec.Target, transforms, err)
|
||||||
|
}
|
||||||
|
newrec.Target = tr.String()
|
||||||
|
case "CNAME":
|
||||||
|
newrec.Target = transform_cname(newrec.Target, src_domain.Name, dst_domain.Name)
|
||||||
|
case "MX", "NS", "TXT":
|
||||||
|
// Not imported.
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("import_transform: Unimplemented record type %v (%v)",
|
||||||
|
rec.Type, rec.Name)
|
||||||
|
}
|
||||||
|
dst_domain.Records = append(dst_domain.Records, &newrec)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteImportTransformRecords deletes any IMPORT_TRANSFORM records from a domain.
|
||||||
|
func deleteImportTransformRecords(domain *models.DomainConfig) {
|
||||||
|
for i := len(domain.Records) - 1; i >= 0; i-- {
|
||||||
|
rec := domain.Records[i]
|
||||||
|
if rec.Type == "IMPORT_TRANSFORM" {
|
||||||
|
domain.Records = append(domain.Records[:i], domain.Records[i+1:]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NormalizeAndValidateConfig(config *models.DNSConfig) (errs []error) {
|
||||||
|
// TODO(tlim): Before any changes are made, we should check the rules
|
||||||
|
// such as MX/CNAME/NS .Target must be a single word, "@", or FQDN.
|
||||||
|
// Validate and normalize
|
||||||
|
for _, domain := range config.Domains {
|
||||||
|
|
||||||
|
// Normalize Nameservers.
|
||||||
|
for _, ns := range domain.Nameservers {
|
||||||
|
ns.Name = dnsutil.AddOrigin(ns.Name, domain.Name)
|
||||||
|
ns.Name = strings.TrimRight(ns.Name, ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize Records.
|
||||||
|
for _, rec := range domain.Records {
|
||||||
|
|
||||||
|
// Validate the unmodified inputs:
|
||||||
|
if err := validateRecordTypes(rec, domain.Name); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
if errs2 := validateTargets(rec, domain.Name); errs2 != nil {
|
||||||
|
errs = append(errs, errs2...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Canonicalize Targets.
|
||||||
|
if rec.Type == "CNAME" || rec.Type == "MX" || rec.Type == "NS" {
|
||||||
|
rec.Target = dnsutil.AddOrigin(rec.Target, domain.Name+".")
|
||||||
|
}
|
||||||
|
// Populate FQDN:
|
||||||
|
rec.NameFQDN = dnsutil.AddOrigin(rec.Name, domain.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process any pseudo-records:
|
||||||
|
for _, domain := range config.Domains {
|
||||||
|
for _, rec := range domain.Records {
|
||||||
|
if rec.Type == "IMPORT_TRANSFORM" {
|
||||||
|
table, err := transform.DecodeTransformTable(rec.Metadata["transform_table"])
|
||||||
|
if err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = import_transform(config.FindDomain(rec.Target), domain, table)
|
||||||
|
if err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Clean up:
|
||||||
|
for _, domain := range config.Domains {
|
||||||
|
deleteImportTransformRecords(domain)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run record transforms
|
||||||
|
for _, domain := range config.Domains {
|
||||||
|
if err := applyRecordTransforms(domain); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify all labels are FQDN ending with ".":
|
||||||
|
for _, domain := range config.Domains {
|
||||||
|
for _, rec := range domain.Records {
|
||||||
|
// .Name must NOT end in "."
|
||||||
|
if rec.Name[len(rec.Name)-1] == '.' {
|
||||||
|
errs = append(errs, fmt.Errorf("Should not happen: Label ends with '.': %v %v %v %v %v",
|
||||||
|
domain.Name, rec.Name, rec.NameFQDN, rec.Type, rec.Target))
|
||||||
|
}
|
||||||
|
// .NameFQDN must NOT end in "."
|
||||||
|
if rec.NameFQDN[len(rec.NameFQDN)-1] == '.' {
|
||||||
|
errs = append(errs, fmt.Errorf("Should not happen: FQDN ends with '.': %v %v %v %v %v",
|
||||||
|
domain.Name, rec.Name, rec.NameFQDN, rec.Type, rec.Target))
|
||||||
|
}
|
||||||
|
// .Target MUST end in "."
|
||||||
|
if rec.Type == "CNAME" || rec.Type == "NS" || rec.Type == "MX" {
|
||||||
|
if rec.Target[len(rec.Target)-1] != '.' {
|
||||||
|
errs = append(errs, fmt.Errorf("Should not happen: Target does NOT ends with '.': %v %v %v %v %v",
|
||||||
|
domain.Name, rec.Name, rec.NameFQDN, rec.Type, rec.Target))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyRecordTransforms(domain *models.DomainConfig) error {
|
||||||
|
for _, rec := range domain.Records {
|
||||||
|
if rec.Type != "A" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tt, ok := rec.Metadata["transform"]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
table, err := transform.DecodeTransformTable(tt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ip := net.ParseIP(rec.Target) //ip already validated above
|
||||||
|
newIPs, err := transform.TransformIPToList(net.ParseIP(rec.Target), table)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for i, newIP := range newIPs {
|
||||||
|
if i == 0 && !newIP.Equal(ip) {
|
||||||
|
rec.Target = newIP.String() //replace target of first record if different
|
||||||
|
} else if i > 0 {
|
||||||
|
// any additional ips need identical records with the alternate ip added to the domain
|
||||||
|
copy, err := rec.Copy()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
copy.Target = newIP.String()
|
||||||
|
domain.Records = append(domain.Records, copy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
140
normalize/validate_test.go
Normal file
140
normalize/validate_test.go
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
package normalize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_assert_no_enddot(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
experiment string
|
||||||
|
isError bool
|
||||||
|
}{
|
||||||
|
{"@", false},
|
||||||
|
{"foo", false},
|
||||||
|
{"foo.bar", false},
|
||||||
|
{"foo.", true},
|
||||||
|
{"foo.bar.", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
err := assert_no_enddot(test.experiment)
|
||||||
|
checkError(t, err, test.isError, test.experiment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkError(t *testing.T, err error, shouldError bool, experiment string) {
|
||||||
|
if err != nil && !shouldError {
|
||||||
|
t.Errorf("%v: Error (%v)\n", experiment, err)
|
||||||
|
}
|
||||||
|
if err == nil && shouldError {
|
||||||
|
t.Errorf("%v: Expected error but got none \n", experiment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_assert_no_underscores(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
experiment string
|
||||||
|
isError bool
|
||||||
|
}{
|
||||||
|
{"@", false},
|
||||||
|
{"foo", false},
|
||||||
|
{"_foo", true},
|
||||||
|
{"foo_", true},
|
||||||
|
{"fo_o", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
err := assert_no_underscores(test.experiment)
|
||||||
|
checkError(t, err, test.isError, test.experiment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_assert_valid_ipv4(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
experiment string
|
||||||
|
isError bool
|
||||||
|
}{
|
||||||
|
{"1.2.3.4", false},
|
||||||
|
{"1.2.3.4/10", true},
|
||||||
|
{"1.2.3", true},
|
||||||
|
{"foo", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
err := assert_valid_ipv4(test.experiment)
|
||||||
|
checkError(t, err, test.isError, test.experiment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_assert_valid_target(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
experiment string
|
||||||
|
isError bool
|
||||||
|
}{
|
||||||
|
{"@", false},
|
||||||
|
{"foo", false},
|
||||||
|
{"foo.bar.", false},
|
||||||
|
{"foo.", false},
|
||||||
|
{"foo.bar", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
err := assert_valid_target(test.experiment)
|
||||||
|
checkError(t, err, test.isError, test.experiment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_transform_cname(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
experiment string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{"@", "old.com.new.com."},
|
||||||
|
{"foo", "foo.old.com.new.com."},
|
||||||
|
{"foo.bar", "foo.bar.old.com.new.com."},
|
||||||
|
{"foo.bar.", "foo.bar.new.com."},
|
||||||
|
{"chat.stackexchange.com.", "chat.stackexchange.com.new.com."},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
actual := transform_cname(test.experiment, "old.com", "new.com")
|
||||||
|
if test.expected != actual {
|
||||||
|
t.Errorf("%v: expected (%v) got (%v)\n", test.experiment, test.expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTransforms(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
givenIP string
|
||||||
|
expectedRecords []string
|
||||||
|
}{
|
||||||
|
{"0.0.5.5", []string{"2.0.5.5"}},
|
||||||
|
{"3.0.5.5", []string{"5.5.5.5"}},
|
||||||
|
{"7.0.5.5", []string{"9.9.9.9", "10.10.10.10"}},
|
||||||
|
}
|
||||||
|
const transform = "0.0.0.0~1.0.0.0~2.0.0.0~; 3.0.0.0~4.0.0.0~~5.5.5.5; 7.0.0.0~8.0.0.0~~9.9.9.9,10.10.10.10"
|
||||||
|
for i, test := range tests {
|
||||||
|
dc := &models.DomainConfig{
|
||||||
|
Records: []*models.RecordConfig{
|
||||||
|
{Type: "A", Target: test.givenIP, Metadata: map[string]string{"transform": transform}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := applyRecordTransforms(dc)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error on test %d: %s", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(dc.Records) != len(test.expectedRecords) {
|
||||||
|
t.Errorf("test %d: expect %d records but found %d", i, len(test.expectedRecords), len(dc.Records))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for r, rec := range dc.Records {
|
||||||
|
if rec.Target != test.expectedRecords[r] {
|
||||||
|
t.Errorf("test %d at index %d: records don't match. Expect %s but found %s.", i, r, test.expectedRecords[r], rec.Target)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
37
providers/activedir/activedirProvider.go
Normal file
37
providers/activedir/activedirProvider.go
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
package activedir
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/providers"
|
||||||
|
)
|
||||||
|
|
||||||
|
var flagFakePowerShell = flag.Bool("fakeps", false, "ACTIVEDIR: Do not run PowerShell. Open adzonedump.*.json files for input, and write to -psout any PS1 commands that make changes.")
|
||||||
|
var flagPsFuture = flag.String("psout", "dns_update_commands.ps1", "ACTIVEDIR: Where to write PS1 commands for future execution.")
|
||||||
|
var flagPsLog = flag.String("pslog", "powershell.log", "ACTIVEDIR: filename of PS1 command log.")
|
||||||
|
|
||||||
|
// This is the struct that matches either (or both) of the Registrar and/or DNSProvider interfaces:
|
||||||
|
type adProvider struct {
|
||||||
|
adServer string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register with the dnscontrol system.
|
||||||
|
// This establishes the name (all caps), and the function to call to initialize it.
|
||||||
|
func init() {
|
||||||
|
providers.RegisterDomainServiceProviderType("ACTIVEDIRECTORY_PS", newDNS)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDNS(config map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {
|
||||||
|
if runtime.GOOS == "windows" || *flagFakePowerShell {
|
||||||
|
srv := config["ADServer"]
|
||||||
|
if srv == "" {
|
||||||
|
return nil, fmt.Errorf("ADServer required for Active Directory provider")
|
||||||
|
}
|
||||||
|
return &adProvider{adServer: srv}, nil
|
||||||
|
}
|
||||||
|
fmt.Printf("WARNING: PowerShell not available. ActiveDirectory will not be updated.\n")
|
||||||
|
return providers.None{}, nil
|
||||||
|
}
|
BIN
providers/activedir/adzonedump.test2.json
Executable file
BIN
providers/activedir/adzonedump.test2.json
Executable file
Binary file not shown.
78
providers/activedir/doc.md
Normal file
78
providers/activedir/doc.md
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
### ActiveDirectory
|
||||||
|
|
||||||
|
This provider updates a DNS Zone in an ActiveDirectory Integrated Zone.
|
||||||
|
|
||||||
|
When run on Windows, AD is updated directly. The code generates
|
||||||
|
PowerShell commands, executes them, and checks the results.
|
||||||
|
It leaves behind a log file of the commands that were generated.
|
||||||
|
|
||||||
|
When run on non-Windows, AD isn't updated because we can't execute
|
||||||
|
PowerShell at this time. Instead of reading the existing zone data
|
||||||
|
from AD, It learns what
|
||||||
|
records are in the zone by reading
|
||||||
|
`adzonedump.{ZONENAME}.json`, a file that must be created beforehand.
|
||||||
|
It does not actually update AD, it generates a file with PowerShell
|
||||||
|
commands that would do the updates, which you must execute afterwords.
|
||||||
|
If the `adzonedump.{ZONENAME}.json` does not exist, the zone is quietly skipped.
|
||||||
|
|
||||||
|
Not implemented:
|
||||||
|
|
||||||
|
* Delete records. This provider will not delete any records. It will only add
|
||||||
|
and change existing records. See "Note to future devs" below.
|
||||||
|
* Update TTLs. It ignores TTLs.
|
||||||
|
|
||||||
|
|
||||||
|
## required creds.json config
|
||||||
|
|
||||||
|
No "creds.json" configuration is expected.
|
||||||
|
|
||||||
|
## example dns config js:
|
||||||
|
|
||||||
|
```
|
||||||
|
var REG_NONE = NewRegistrar('none', 'NONE')
|
||||||
|
var DSP_ACTIVEDIRECTORY_DS = NewDSP("activedir", "ACTIVEDIRECTORY_PS");
|
||||||
|
|
||||||
|
D('ds.stackexchange.com', REG_NONE,
|
||||||
|
DSP_ACTIVEDIRECTORY_DS,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
//records handled by another provider...
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Special Windows stuff
|
||||||
|
|
||||||
|
This provider needs to do 2 things:
|
||||||
|
|
||||||
|
* Get a list of zone records:
|
||||||
|
* powerShellDump: Runs a PS command that dumps the zone to JSON.
|
||||||
|
* readZoneDump: Opens a adzonedump.$DOMAINNAME.json file and reads JSON out of it. If the file does not exist, this is considered an error and processing stops.
|
||||||
|
|
||||||
|
* Update records:
|
||||||
|
* powerShellExec: Execute PS commands that do the update.
|
||||||
|
* powerShellRecord: Record the PS command that can be run later to do the updates. This file is -psout=dns_update_commands.ps1
|
||||||
|
|
||||||
|
So what happens when? Well, that's complex. We want both Windows and Linux to be able to use -fakewindows
|
||||||
|
for either debugging or (on Windows) actual use. However only Windows permits -fakewinows=false and actually executes
|
||||||
|
the PS code. Here's which algorithm is used for each case:
|
||||||
|
|
||||||
|
* If -fakewindows is used on any system: readZoneDump and powerShellRecord is used.
|
||||||
|
* On Windows (without -fakewindows): powerShellDump and powerShellExec is used.
|
||||||
|
* On Linux (wihtout -fakewindows): the provider loads as "NONE" and nothing happens.
|
||||||
|
|
||||||
|
|
||||||
|
## Note to future devs
|
||||||
|
|
||||||
|
### Why doesn't this provider delete records?
|
||||||
|
|
||||||
|
Because at this time Stack doesn't fully control AD zones
|
||||||
|
using dnscontrol. It only needs to add/change records.
|
||||||
|
|
||||||
|
What should we do when it does need to delete them?
|
||||||
|
|
||||||
|
Currently NO_PURGE is a no-op. I would change it to update
|
||||||
|
domain metadata to flag that deletes should be enabled/disabled.
|
||||||
|
Then generate the deletes only if this flag exists. To be paranoid,
|
||||||
|
the func that does the deleting could check this flag to make sure
|
||||||
|
that it really should be deleting something.
|
293
providers/activedir/domains.go
Normal file
293
providers/activedir/domains.go
Normal file
|
@ -0,0 +1,293 @@
|
||||||
|
package activedir
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/TomOnTime/utfutil"
|
||||||
|
"github.com/miekg/dns/dnsutil"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers/diff"
|
||||||
|
)
|
||||||
|
|
||||||
|
const zoneDumpFilenamePrefix = "adzonedump"
|
||||||
|
|
||||||
|
type RecordConfigJson struct {
|
||||||
|
Name string `json:"hostname"`
|
||||||
|
Type string `json:"recordtype"`
|
||||||
|
Data string `json:"recorddata"`
|
||||||
|
TTL uint32 `json:"timetolive"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDomainCorrections gets existing records, diffs them against existing, and returns corrections.
|
||||||
|
func (c *adProvider) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {
|
||||||
|
|
||||||
|
// Read foundRecords:
|
||||||
|
foundRecords, err := c.getExistingRecords(dc.Name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("c.getExistingRecords(%v) failed: %v", dc.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read expectedRecords:
|
||||||
|
//expectedRecords := make([]*models.RecordConfig, len(dc.Records))
|
||||||
|
expectedRecords := make([]diff.Record, len(dc.Records))
|
||||||
|
for i, r := range dc.Records {
|
||||||
|
if r.TTL == 0 {
|
||||||
|
r.TTL = models.DefaultTTL
|
||||||
|
}
|
||||||
|
expectedRecords[i] = r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to []diff.Records and compare:
|
||||||
|
foundDiffRecords := make([]diff.Record, 0, len(foundRecords))
|
||||||
|
for _, rec := range foundRecords {
|
||||||
|
foundDiffRecords = append(foundDiffRecords, rec)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, create, _, mod := diff.IncrementalDiff(foundDiffRecords, expectedRecords)
|
||||||
|
// NOTE(tlim): This provider does not delete records. If
|
||||||
|
// you need to delete a record, either delete it manually
|
||||||
|
// or see providers/activedir/doc.md for implementation tips.
|
||||||
|
|
||||||
|
// Generate changes.
|
||||||
|
corrections := []*models.Correction{}
|
||||||
|
for _, d := range create {
|
||||||
|
corrections = append(corrections, c.createRec(dc.Name, d.Desired.(*models.RecordConfig))...)
|
||||||
|
}
|
||||||
|
for _, m := range mod {
|
||||||
|
corrections = append(corrections, c.modifyRec(dc.Name, m))
|
||||||
|
}
|
||||||
|
return corrections, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// zoneDumpFilename returns the filename to use to write or read
|
||||||
|
// an activedirectory zone dump for a particular domain.
|
||||||
|
func zoneDumpFilename(domainname string) string {
|
||||||
|
return zoneDumpFilenamePrefix + "." + domainname + ".json"
|
||||||
|
}
|
||||||
|
|
||||||
|
// readZoneDump reads a pre-existing zone dump from adzonedump.*.json.
|
||||||
|
func (c *adProvider) readZoneDump(domainname string) ([]byte, error) {
|
||||||
|
// File not found is considered an error.
|
||||||
|
dat, err := utfutil.ReadFile(zoneDumpFilename(domainname), utfutil.WINDOWS)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Powershell to generate zone dump:")
|
||||||
|
fmt.Println(c.generatePowerShellZoneDump(domainname))
|
||||||
|
}
|
||||||
|
return dat, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// powerShellLogCommand logs to flagPsLog that a PowerShell command is going to be run.
|
||||||
|
func powerShellLogCommand(command string) error {
|
||||||
|
return logHelper(fmt.Sprintf("# %s\r\n%s\r\n", time.Now().UTC(), strings.TrimSpace(command)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// powerShellLogOutput logs to flagPsLog that a PowerShell command is going to be run.
|
||||||
|
func powerShellLogOutput(s string) error {
|
||||||
|
return logHelper(fmt.Sprintf("OUTPUT: START\r\n%s\r\nOUTPUT: END\r\n", s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// powerShellLogErr logs that a PowerShell command had an error.
|
||||||
|
func powerShellLogErr(e error) error {
|
||||||
|
err := logHelper(fmt.Sprintf("ERROR: %v\r\r", e)) //Log error to powershell.log
|
||||||
|
if err != nil {
|
||||||
|
return err //Bubble up error created in logHelper
|
||||||
|
}
|
||||||
|
return e //Bubble up original error
|
||||||
|
}
|
||||||
|
|
||||||
|
func logHelper(s string) error {
|
||||||
|
logfile, err := os.OpenFile(*flagPsLog, os.O_APPEND|os.O_RDWR|os.O_CREATE, 0660)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("ERROR: Can not create/append to %#v: %v\n", *flagPsLog, err)
|
||||||
|
}
|
||||||
|
_, err = fmt.Fprintln(logfile, s)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("ERROR: Append to %#v failed: %v\n", *flagPsLog, err)
|
||||||
|
}
|
||||||
|
if logfile.Close() != nil {
|
||||||
|
return fmt.Errorf("ERROR: Closing %#v failed: %v\n", *flagPsLog, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// powerShellRecord records that a PowerShell command should be executed later.
|
||||||
|
func powerShellRecord(command string) error {
|
||||||
|
recordfile, err := os.OpenFile(*flagPsFuture, os.O_APPEND|os.O_RDWR|os.O_CREATE, 0660)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("ERROR: Can not create/append to %#v: %v\n", *flagPsFuture, err)
|
||||||
|
}
|
||||||
|
_, err = recordfile.WriteString(command)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("ERROR: Append to %#v failed: %v\n", *flagPsFuture, err)
|
||||||
|
}
|
||||||
|
return recordfile.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *adProvider) getExistingRecords(domainname string) ([]*models.RecordConfig, error) {
|
||||||
|
//log.Printf("getExistingRecords(%s)\n", domainname)
|
||||||
|
|
||||||
|
// Get the JSON either from adzonedump or by running a PowerShell script.
|
||||||
|
data, err := c.getRecords(domainname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("getRecords failed on %#v: %v\n", domainname, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var recs []*RecordConfigJson
|
||||||
|
err = json.Unmarshal(data, &recs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("json.Unmarshal failed on %#v: %v\n", domainname, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make([]*models.RecordConfig, 0, len(recs))
|
||||||
|
for i := range recs {
|
||||||
|
t, err := recs[i].unpackRecord(domainname)
|
||||||
|
if err == nil {
|
||||||
|
result = append(result, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RecordConfigJson) unpackRecord(origin string) (*models.RecordConfig, error) {
|
||||||
|
rc := models.RecordConfig{}
|
||||||
|
|
||||||
|
rc.Name = strings.ToLower(r.Name)
|
||||||
|
rc.NameFQDN = dnsutil.AddOrigin(rc.Name, origin)
|
||||||
|
rc.Type = r.Type
|
||||||
|
rc.TTL = r.TTL
|
||||||
|
|
||||||
|
switch rc.Type {
|
||||||
|
case "A":
|
||||||
|
rc.Target = r.Data
|
||||||
|
case "CNAME":
|
||||||
|
rc.Target = strings.ToLower(r.Data)
|
||||||
|
case "AAAA", "MX", "NAPTR", "NS", "SOA", "SRV":
|
||||||
|
return nil, fmt.Errorf("Unimplemented: %v", r.Type)
|
||||||
|
default:
|
||||||
|
log.Fatalf("Unhandled models.RecordConfigJson type: %v (%v)\n", rc.Type, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &rc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// powerShellDump runs a PowerShell command to get a dump of all records in a DNS zone.
|
||||||
|
func (c *adProvider) generatePowerShellZoneDump(domainname string) string {
|
||||||
|
cmd_txt := `@("REPLACE_WITH_ZONE") | %{
|
||||||
|
Get-DnsServerResourceRecord -ComputerName REPLACE_WITH_COMPUTER_NAME -ZoneName $_ | select hostname,recordtype,@{n="timestamp";e={$_.timestamp.tostring()}},@{n="timetolive";e={$_.timetolive.totalseconds}},@{n="recorddata";e={($_.recorddata.ipv4address,$_.recorddata.ipv6address,$_.recorddata.HostNameAlias,"other_record" -ne $null)[0]-as [string]}} | ConvertTo-Json > REPLACE_WITH_FILENAMEPREFIX.REPLACE_WITH_ZONE.json
|
||||||
|
}`
|
||||||
|
cmd_txt = strings.Replace(cmd_txt, "REPLACE_WITH_ZONE", domainname, -1)
|
||||||
|
cmd_txt = strings.Replace(cmd_txt, "REPLACE_WITH_COMPUTER_NAME", c.adServer, -1)
|
||||||
|
cmd_txt = strings.Replace(cmd_txt, "REPLACE_WITH_FILENAMEPREFIX", zoneDumpFilenamePrefix, -1)
|
||||||
|
|
||||||
|
return cmd_txt
|
||||||
|
}
|
||||||
|
|
||||||
|
// generatePowerShellCreate generates PowerShell commands to ADD a record.
|
||||||
|
func (c *adProvider) generatePowerShellCreate(domainname string, rec *models.RecordConfig) string {
|
||||||
|
|
||||||
|
content := rec.Target
|
||||||
|
|
||||||
|
text := "\r\n" // Skip a line.
|
||||||
|
text += fmt.Sprintf("Add-DnsServerResourceRecord%s", rec.Type)
|
||||||
|
text += fmt.Sprintf(` -ComputerName "%s"`, c.adServer)
|
||||||
|
text += fmt.Sprintf(` -ZoneName "%s"`, domainname)
|
||||||
|
text += fmt.Sprintf(` -Name "%s"`, rec.Name)
|
||||||
|
switch rec.Type {
|
||||||
|
case "CNAME":
|
||||||
|
text += fmt.Sprintf(` -HostNameAlias "%s"`, content)
|
||||||
|
case "A":
|
||||||
|
text += fmt.Sprintf(` -IPv4Address "%s"`, content)
|
||||||
|
case "NS":
|
||||||
|
text = fmt.Sprintf("\r\n"+`echo "Skipping NS update (%v %v)"`+"\r\n", rec.Name, rec.Target)
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("ERROR: generatePowerShellCreate() does not yet handle recType=%s recName=%#v content=%#v)\n", rec.Type, rec.Name, content))
|
||||||
|
}
|
||||||
|
text += "\r\n"
|
||||||
|
|
||||||
|
return text
|
||||||
|
}
|
||||||
|
|
||||||
|
// generatePowerShellModify generates PowerShell commands to MODIFY a record.
|
||||||
|
func (c *adProvider) generatePowerShellModify(domainname, recName, recType, oldContent, newContent string, oldTTL, newTTL uint32) string {
|
||||||
|
|
||||||
|
var queryField, queryContent string
|
||||||
|
|
||||||
|
switch recType {
|
||||||
|
case "A":
|
||||||
|
queryField = "IPv4address"
|
||||||
|
queryContent = `"` + oldContent + `"`
|
||||||
|
case "CNAME":
|
||||||
|
queryField = "HostNameAlias"
|
||||||
|
queryContent = `"` + oldContent + `"`
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("ERROR: generatePowerShellModify() does not yet handle recType=%s recName=%#v content=(%#v, %#v)\n", recType, recName, oldContent, newContent))
|
||||||
|
}
|
||||||
|
|
||||||
|
text := "\r\n" // Skip a line.
|
||||||
|
text += fmt.Sprintf(`echo "MODIFY %s %s %s old=%s new=%s"`, recName, domainname, recType, oldContent, newContent)
|
||||||
|
text += "\r\n"
|
||||||
|
|
||||||
|
text += "$OldObj = Get-DnsServerResourceRecord"
|
||||||
|
text += fmt.Sprintf(` -ComputerName "%s"`, c.adServer)
|
||||||
|
text += fmt.Sprintf(` -ZoneName "%s"`, domainname)
|
||||||
|
text += fmt.Sprintf(` -Name "%s"`, recName)
|
||||||
|
text += fmt.Sprintf(` -RRType "%s"`, recType)
|
||||||
|
text += fmt.Sprintf(" | Where-Object {$_.RecordData.%s -eq %s -and $_.HostName -eq \"%s\"}", queryField, queryContent, recName)
|
||||||
|
text += "\r\n"
|
||||||
|
text += `if($OldObj.Length -ne $null){ throw "Error, multiple results for Get-DnsServerResourceRecord" }`
|
||||||
|
text += "\r\n"
|
||||||
|
|
||||||
|
text += "$NewObj = $OldObj.Clone()"
|
||||||
|
text += "\r\n"
|
||||||
|
|
||||||
|
if oldContent != newContent {
|
||||||
|
text += fmt.Sprintf(`$NewObj.RecordData.%s = "%s"`, queryField, newContent)
|
||||||
|
text += "\r\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldTTL != newTTL {
|
||||||
|
text += fmt.Sprintf(`$NewObj.TimeToLive = New-TimeSpan -Seconds %d`, newTTL)
|
||||||
|
text += "\r\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
text += "Set-DnsServerResourceRecord"
|
||||||
|
text += fmt.Sprintf(` -ComputerName "%s"`, c.adServer)
|
||||||
|
text += fmt.Sprintf(` -ZoneName "%s"`, domainname)
|
||||||
|
text += fmt.Sprintf(` -NewInputObject $NewObj -OldInputObject $OldObj`)
|
||||||
|
text += "\r\n"
|
||||||
|
|
||||||
|
return text
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *adProvider) createRec(domainname string, rec *models.RecordConfig) []*models.Correction {
|
||||||
|
arr := []*models.Correction{
|
||||||
|
{
|
||||||
|
Msg: fmt.Sprintf("CREATE record: %s %s ttl(%d) %s", rec.Name, rec.Type, rec.TTL, rec.Target),
|
||||||
|
F: func() error {
|
||||||
|
return powerShellDoCommand(c.generatePowerShellCreate(domainname, rec))
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
return arr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *adProvider) modifyRec(domainname string, m diff.Correlation) *models.Correction {
|
||||||
|
|
||||||
|
old, rec := m.Existing.(*models.RecordConfig), m.Desired.(*models.RecordConfig)
|
||||||
|
oldContent := old.GetContent()
|
||||||
|
newContent := rec.GetContent()
|
||||||
|
|
||||||
|
return &models.Correction{
|
||||||
|
Msg: m.String(),
|
||||||
|
F: func() error {
|
||||||
|
return powerShellDoCommand(c.generatePowerShellModify(domainname, rec.Name, rec.Type, oldContent, newContent, old.TTL, rec.TTL))
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
40
providers/activedir/domains_test.go
Normal file
40
providers/activedir/domains_test.go
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
package activedir
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetExistingRecords(t *testing.T) {
|
||||||
|
|
||||||
|
cf := &adProvider{}
|
||||||
|
|
||||||
|
*flagFakePowerShell = true
|
||||||
|
actual, err := cf.getExistingRecords("test2")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
expected := []*models.RecordConfig{
|
||||||
|
{Name: "@", NameFQDN: "test2", Type: "A", TTL: 600, Target: "10.166.2.11"},
|
||||||
|
//{Name: "_msdcs", NameFQDN: "_msdcs.test2", Type: "NS", TTL: 300, Target: "other_record"}, // Will be filtered.
|
||||||
|
{Name: "co-devsearch02", NameFQDN: "co-devsearch02.test2", Type: "A", TTL: 3600, Target: "10.8.2.64"},
|
||||||
|
{Name: "co-devservice01", NameFQDN: "co-devservice01.test2", Type: "A", TTL: 1200, Target: "10.8.2.48"}, // Downcased.
|
||||||
|
{Name: "yum", NameFQDN: "yum.test2", Type: "A", TTL: 3600, Target: "10.8.0.59"},
|
||||||
|
}
|
||||||
|
|
||||||
|
actualS := ""
|
||||||
|
for i, x := range actual {
|
||||||
|
actualS += fmt.Sprintf("%d %v\n", i, x)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedS := ""
|
||||||
|
for i, x := range expected {
|
||||||
|
expectedS += fmt.Sprintf("%d %v\n", i, x)
|
||||||
|
}
|
||||||
|
|
||||||
|
if actualS != expectedS {
|
||||||
|
t.Fatalf("got\n(%s)\nbut expected\n(%s)", actualS, expectedS)
|
||||||
|
}
|
||||||
|
}
|
17
providers/activedir/getzones_other.go
Normal file
17
providers/activedir/getzones_other.go
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package activedir
|
||||||
|
|
||||||
|
func (c *adProvider) getRecords(domainname string) ([]byte, error) {
|
||||||
|
if !*flagFakePowerShell {
|
||||||
|
panic("Can not happen: PowerShell on non-windows")
|
||||||
|
}
|
||||||
|
return c.readZoneDump(domainname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func powerShellDoCommand(command string) error {
|
||||||
|
if !*flagFakePowerShell {
|
||||||
|
panic("Can not happen: PowerShell on non-windows")
|
||||||
|
}
|
||||||
|
return powerShellRecord(command)
|
||||||
|
}
|
95
providers/activedir/getzones_windows.go
Normal file
95
providers/activedir/getzones_windows.go
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
package activedir
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *adProvider) getRecords(domainname string) ([]byte, error) {
|
||||||
|
|
||||||
|
if !*flagFakePowerShell {
|
||||||
|
// If we are using PowerShell, make sure it is enabled
|
||||||
|
// and then run the PS1 command to generate the adzonedump file.
|
||||||
|
|
||||||
|
if !isPowerShellReady() {
|
||||||
|
fmt.Printf("\n\n\n")
|
||||||
|
fmt.Printf("***********************************************\n")
|
||||||
|
fmt.Printf("PowerShell DnsServer module not installed.\n")
|
||||||
|
fmt.Printf("See http://social.technet.microsoft.com/wiki/contents/articles/2202.remote-server-administration-tools-rsat-for-windows-client-and-windows-server-dsforum2wiki.aspx\n")
|
||||||
|
fmt.Printf("***********************************************\n")
|
||||||
|
fmt.Printf("\n\n\n")
|
||||||
|
return nil, fmt.Errorf("PowerShell module DnsServer not installed.")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := powerShellExecCombined(c.generatePowerShellZoneDump(domainname))
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the contents of zone.*.json file instead.
|
||||||
|
return c.readZoneDump(domainname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPowerShellReady() bool {
|
||||||
|
query, _ := powerShellExec(`(Get-Module -ListAvailable DnsServer) -ne $null`)
|
||||||
|
q, err := strconv.ParseBool(strings.TrimSpace(string(query)))
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
|
func powerShellDoCommand(command string) error {
|
||||||
|
if *flagFakePowerShell {
|
||||||
|
// If fake, just record the command.
|
||||||
|
return powerShellRecord(command)
|
||||||
|
}
|
||||||
|
_, err := powerShellExec(command)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func powerShellExec(command string) ([]byte, error) {
|
||||||
|
// log it.
|
||||||
|
err := powerShellLogCommand(command)
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run it.
|
||||||
|
out, err := exec.Command("powershell", "-NoProfile", command).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
// If there was an error, log it.
|
||||||
|
powerShellLogErr(err)
|
||||||
|
}
|
||||||
|
// Return the result.
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// powerShellExecCombined runs a PS1 command and logs the output. This is useful when the output should be none or very small.
|
||||||
|
func powerShellExecCombined(command string) ([]byte, error) {
|
||||||
|
// log it.
|
||||||
|
err := powerShellLogCommand(command)
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run it.
|
||||||
|
out, err := exec.Command("powershell", "-NoProfile", command).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
// If there was an error, log it.
|
||||||
|
powerShellLogErr(err)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log output.
|
||||||
|
err = powerShellLogOutput(string(out))
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the result.
|
||||||
|
return out, err
|
||||||
|
}
|
BIN
providers/activedir/zone.testzone.json
Executable file
BIN
providers/activedir/zone.testzone.json
Executable file
Binary file not shown.
304
providers/bind/bindProvider.go
Normal file
304
providers/bind/bindProvider.go
Normal file
|
@ -0,0 +1,304 @@
|
||||||
|
package bind
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
bind -
|
||||||
|
Generate zonefiles suitiable for BIND.
|
||||||
|
|
||||||
|
The zonefiles are read and written to the directory -bind_dir
|
||||||
|
|
||||||
|
If the old zonefiles are readable, we read them to determine
|
||||||
|
if an update is actually needed. The old zonefile is also used
|
||||||
|
as the basis for generating the new SOA serial number.
|
||||||
|
|
||||||
|
If -bind_skeletin_src and -bind_skeletin_dst is defined, a
|
||||||
|
recursive file copy is performed from src to dst. This is
|
||||||
|
useful for copying named.ca and other static files.
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
"github.com/miekg/dns/dnsutil"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers/diff"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SoaInfo struct {
|
||||||
|
Ns string `json:"master"`
|
||||||
|
Mbox string `json:"mbox"`
|
||||||
|
Serial uint32 `json:"serial"`
|
||||||
|
Refresh uint32 `json:"refresh"`
|
||||||
|
Retry uint32 `json:"retry"`
|
||||||
|
Expire uint32 `json:"expire"`
|
||||||
|
Minttl uint32 `json:"minttl"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s SoaInfo) String() string {
|
||||||
|
return fmt.Sprintf("%s %s %d %d %d %d %d", s.Ns, s.Mbox, s.Serial, s.Refresh, s.Retry, s.Expire, s.Minttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Bind struct {
|
||||||
|
Default_ns []string `json:"default_ns"`
|
||||||
|
Default_Soa SoaInfo `json:"default_soa"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var bindBaseDir = flag.String("bindtree", "zones", "BIND: Directory that stores BIND zonefiles.")
|
||||||
|
|
||||||
|
//var bindSkeletin = flag.String("bind_skeletin", "skeletin/master/var/named/chroot/var/named/master", "")
|
||||||
|
|
||||||
|
func rrToRecord(rr dns.RR, origin string, replace_serial uint32) (models.RecordConfig, uint32) {
|
||||||
|
// Convert's dns.RR into our native data type (models.RecordConfig).
|
||||||
|
// Records are translated directly with no changes.
|
||||||
|
// If it is an SOA for the apex domain and
|
||||||
|
// replace_serial != 0, change the serial to replace_serial.
|
||||||
|
// WARNING(tlim): This assumes SOAs do not have serial=0.
|
||||||
|
// If one is found, we replace it with serial=1.
|
||||||
|
var old_serial, new_serial uint32
|
||||||
|
header := rr.Header()
|
||||||
|
rc := models.RecordConfig{}
|
||||||
|
rc.Type = dns.TypeToString[header.Rrtype]
|
||||||
|
rc.NameFQDN = strings.ToLower(strings.TrimSuffix(header.Name, "."))
|
||||||
|
rc.Name = strings.ToLower(dnsutil.TrimDomainName(header.Name, origin))
|
||||||
|
rc.TTL = header.Ttl
|
||||||
|
if rc.TTL == models.DefaultTTL {
|
||||||
|
rc.TTL = 0
|
||||||
|
}
|
||||||
|
switch v := rr.(type) {
|
||||||
|
case *dns.A:
|
||||||
|
rc.Target = v.A.String()
|
||||||
|
case *dns.CNAME:
|
||||||
|
rc.Target = v.Target
|
||||||
|
case *dns.MX:
|
||||||
|
rc.Target = v.Mx
|
||||||
|
rc.Priority = v.Preference
|
||||||
|
case *dns.NS:
|
||||||
|
rc.Target = v.Ns
|
||||||
|
case *dns.SOA:
|
||||||
|
old_serial = v.Serial
|
||||||
|
if old_serial == 0 {
|
||||||
|
// For SOA records, we never return a 0 serial number.
|
||||||
|
old_serial = 1
|
||||||
|
}
|
||||||
|
new_serial = v.Serial
|
||||||
|
if rc.Name == "@" && replace_serial != 0 {
|
||||||
|
new_serial = replace_serial
|
||||||
|
}
|
||||||
|
rc.Target = fmt.Sprintf("%v %v %v %v %v %v %v",
|
||||||
|
v.Ns, v.Mbox, new_serial, v.Refresh, v.Retry, v.Expire, v.Minttl)
|
||||||
|
case *dns.TXT:
|
||||||
|
rc.Target = strings.Join(v.Txt, " ")
|
||||||
|
default:
|
||||||
|
log.Fatalf("Unimplemented zone record type=%s (%v)\n", rc.Type, rr)
|
||||||
|
}
|
||||||
|
return rc, old_serial
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeDefaultSOA(info SoaInfo, origin string) *models.RecordConfig {
|
||||||
|
// Make a default SOA record in case one isn't found:
|
||||||
|
soa_rec := models.RecordConfig{
|
||||||
|
Type: "SOA",
|
||||||
|
Name: "@",
|
||||||
|
}
|
||||||
|
soa_rec.NameFQDN = dnsutil.AddOrigin(soa_rec.Name, origin)
|
||||||
|
|
||||||
|
if len(info.Ns) == 0 {
|
||||||
|
info.Ns = "DEFAULT_NOT_SET"
|
||||||
|
}
|
||||||
|
if len(info.Mbox) == 0 {
|
||||||
|
info.Mbox = "DEFAULT_NOT_SET"
|
||||||
|
}
|
||||||
|
if info.Serial == 0 {
|
||||||
|
info.Serial = 1
|
||||||
|
}
|
||||||
|
if info.Refresh == 0 {
|
||||||
|
info.Refresh = 3600
|
||||||
|
}
|
||||||
|
if info.Retry == 0 {
|
||||||
|
info.Retry = 600
|
||||||
|
}
|
||||||
|
if info.Expire == 0 {
|
||||||
|
info.Expire = 604800
|
||||||
|
}
|
||||||
|
if info.Minttl == 0 {
|
||||||
|
info.Minttl = 1440
|
||||||
|
}
|
||||||
|
soa_rec.Target = info.String()
|
||||||
|
|
||||||
|
return &soa_rec
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeDefaultNS(origin string, names []string) []*models.RecordConfig {
|
||||||
|
var result []*models.RecordConfig
|
||||||
|
for _, n := range names {
|
||||||
|
rc := &models.RecordConfig{
|
||||||
|
Type: "NS",
|
||||||
|
Name: "@",
|
||||||
|
Target: n,
|
||||||
|
}
|
||||||
|
rc.NameFQDN = dnsutil.AddOrigin(rc.Name, origin)
|
||||||
|
result = append(result, rc)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Bind) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {
|
||||||
|
|
||||||
|
// Phase 1: Copy everything to []*models.RecordConfig:
|
||||||
|
// expectedRecords < dc.Records[i]
|
||||||
|
// foundRecords < zonefile
|
||||||
|
//
|
||||||
|
// Phase 2: Do any manipulations:
|
||||||
|
// add NS
|
||||||
|
// manipulate SOA
|
||||||
|
//
|
||||||
|
// Phase 3: Convert to []diff.Records and compare:
|
||||||
|
// expectedDiffRecords < expectedRecords
|
||||||
|
// foundDiffRecords < foundRecords
|
||||||
|
// diff.Inc...(foundDiffRecords, expectedDiffRecords )
|
||||||
|
|
||||||
|
// Default SOA record. If we see one in the zone, this will be replaced.
|
||||||
|
soa_rec := makeDefaultSOA(c.Default_Soa, dc.Name)
|
||||||
|
|
||||||
|
// Read expectedRecords:
|
||||||
|
expectedRecords := make([]*models.RecordConfig, 0, len(dc.Records))
|
||||||
|
for i := range dc.Records {
|
||||||
|
expectedRecords = append(expectedRecords, dc.Records[i])
|
||||||
|
}
|
||||||
|
// Read foundRecords:
|
||||||
|
foundRecords := make([]*models.RecordConfig, 0)
|
||||||
|
var old_serial, new_serial uint32
|
||||||
|
zonefile := filepath.Join(*bindBaseDir, strings.ToLower(dc.Name)+".zone")
|
||||||
|
found_fh, err := os.Open(zonefile)
|
||||||
|
zone_file_found := err == nil
|
||||||
|
if err != nil && !os.IsNotExist(os.ErrNotExist) {
|
||||||
|
// Don't whine if the file doesn't exist. However all other
|
||||||
|
// errors will be reported.
|
||||||
|
fmt.Printf("Could not read zonefile: %v\n", err)
|
||||||
|
} else {
|
||||||
|
for x := range dns.ParseZone(found_fh, dc.Name, zonefile) {
|
||||||
|
if x.Error != nil {
|
||||||
|
log.Println("Error in zonefile:", x.Error)
|
||||||
|
} else {
|
||||||
|
rec, serial := rrToRecord(x.RR, dc.Name, old_serial)
|
||||||
|
if serial != 0 && old_serial != 0 {
|
||||||
|
log.Fatalf("Multiple SOA records in zonefile: %v\n", zonefile)
|
||||||
|
}
|
||||||
|
if serial != 0 {
|
||||||
|
// This was an SOA record. Update the serial.
|
||||||
|
old_serial = serial
|
||||||
|
new_serial = generate_serial(old_serial)
|
||||||
|
// Regenerate with new serial:
|
||||||
|
*soa_rec, _ = rrToRecord(x.RR, dc.Name, new_serial)
|
||||||
|
rec = *soa_rec
|
||||||
|
}
|
||||||
|
foundRecords = append(foundRecords, &rec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add NS records:
|
||||||
|
if len(c.Default_ns) != 0 && !dc.HasRecordTypeName("NS", "@") {
|
||||||
|
expectedRecords = append(expectedRecords, makeDefaultNS(dc.Name, c.Default_ns)...)
|
||||||
|
dc.Records = append(dc.Records, makeDefaultNS(dc.Name, c.Default_ns)...)
|
||||||
|
}
|
||||||
|
// Add SOA record:
|
||||||
|
if !dc.HasRecordTypeName("SOA", "@") {
|
||||||
|
expectedRecords = append(expectedRecords, soa_rec)
|
||||||
|
dc.Records = append(dc.Records, soa_rec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to []diff.Records and compare:
|
||||||
|
foundDiffRecords := make([]diff.Record, len(foundRecords))
|
||||||
|
for i := range foundRecords {
|
||||||
|
foundDiffRecords[i] = foundRecords[i]
|
||||||
|
}
|
||||||
|
expectedDiffRecords := make([]diff.Record, len(expectedRecords))
|
||||||
|
for i := range expectedRecords {
|
||||||
|
expectedDiffRecords[i] = expectedRecords[i]
|
||||||
|
}
|
||||||
|
_, create, del, mod := diff.IncrementalDiff(foundDiffRecords, expectedDiffRecords)
|
||||||
|
|
||||||
|
// Print a list of changes. Generate an actual change that is the zone
|
||||||
|
changes := false
|
||||||
|
for _, i := range create {
|
||||||
|
changes = true
|
||||||
|
if zone_file_found {
|
||||||
|
fmt.Println(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, i := range del {
|
||||||
|
changes = true
|
||||||
|
if zone_file_found {
|
||||||
|
fmt.Println(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, i := range mod {
|
||||||
|
changes = true
|
||||||
|
if zone_file_found {
|
||||||
|
fmt.Println(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msg := fmt.Sprintf("GENERATE_ZONEFILE: %s", dc.Name)
|
||||||
|
if !zone_file_found {
|
||||||
|
msg = msg + fmt.Sprintf(" (%d records)", len(create))
|
||||||
|
}
|
||||||
|
corrections := []*models.Correction{}
|
||||||
|
if changes {
|
||||||
|
corrections = append(corrections,
|
||||||
|
&models.Correction{
|
||||||
|
Msg: msg,
|
||||||
|
F: func() error {
|
||||||
|
fmt.Printf("CREATING ZONEFILE: %v\n", zonefile)
|
||||||
|
zf, err := os.Create(zonefile)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Could not create zonefile: %v", err)
|
||||||
|
}
|
||||||
|
zonefilerecords := make([]dns.RR, 0, len(dc.Records))
|
||||||
|
for _, r := range dc.Records {
|
||||||
|
zonefilerecords = append(zonefilerecords, r.RR())
|
||||||
|
}
|
||||||
|
err = WriteZoneFile(zf, zonefilerecords, dc.Name, models.DefaultTTL)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("WriteZoneFile error: %v\n", err)
|
||||||
|
}
|
||||||
|
err = zf.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Closing: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return corrections, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func initBind(config map[string]string, providermeta json.RawMessage) (providers.DNSServiceProvider, error) {
|
||||||
|
// m -- the json blob from creds.json
|
||||||
|
// meta -- the json blob from NewReq('name', 'TYPE', meta)
|
||||||
|
|
||||||
|
api := &Bind{}
|
||||||
|
if len(providermeta) != 0 {
|
||||||
|
err := json.Unmarshal(providermeta, api)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return api, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
providers.RegisterDomainServiceProviderType("BIND", initBind)
|
||||||
|
}
|
227
providers/bind/prettyzone.go
Normal file
227
providers/bind/prettyzone.go
Normal file
|
@ -0,0 +1,227 @@
|
||||||
|
// Generate zonefiles.
|
||||||
|
// This generates a zonefile that prioritizes beauty over efficiency.
|
||||||
|
package bind
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
"github.com/miekg/dns/dnsutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type zoneGenData struct {
|
||||||
|
Origin string
|
||||||
|
DefaultTtl uint32
|
||||||
|
Records []dns.RR
|
||||||
|
}
|
||||||
|
|
||||||
|
func (z *zoneGenData) Len() int { return len(z.Records) }
|
||||||
|
func (z *zoneGenData) Swap(i, j int) { z.Records[i], z.Records[j] = z.Records[j], z.Records[i] }
|
||||||
|
func (z *zoneGenData) Less(i, j int) bool {
|
||||||
|
//fmt.Printf("DEBUG: i=%#v j=%#v\n", i, j)
|
||||||
|
//fmt.Printf("DEBUG: z.Records=%#v\n", len(z.Records))
|
||||||
|
a, b := z.Records[i], z.Records[j]
|
||||||
|
//fmt.Printf("DEBUG: a=%#v b=%#v\n", a, b)
|
||||||
|
compA, compB := dnsutil.AddOrigin(a.Header().Name, z.Origin+"."), dnsutil.AddOrigin(b.Header().Name, z.Origin+".")
|
||||||
|
if compA != compB {
|
||||||
|
if compA == z.Origin+"." {
|
||||||
|
compA = "@"
|
||||||
|
}
|
||||||
|
if compB == z.Origin+"." {
|
||||||
|
compB = "@"
|
||||||
|
}
|
||||||
|
return zoneLabelLess(compA, compB)
|
||||||
|
}
|
||||||
|
rrtypeA, rrtypeB := a.Header().Rrtype, b.Header().Rrtype
|
||||||
|
if rrtypeA != rrtypeB {
|
||||||
|
return zoneRrtypeLess(rrtypeA, rrtypeB)
|
||||||
|
}
|
||||||
|
if rrtypeA == dns.TypeA {
|
||||||
|
ta2, tb2 := a.(*dns.A), b.(*dns.A)
|
||||||
|
ipa, ipb := ta2.A.To4(), tb2.A.To4()
|
||||||
|
if ipa == nil || ipb == nil {
|
||||||
|
log.Fatalf("should not happen: IPs are not 4 bytes: %#v %#v", ta2, tb2)
|
||||||
|
}
|
||||||
|
return bytes.Compare(ipa, ipb) == -1
|
||||||
|
}
|
||||||
|
if rrtypeA == dns.TypeMX {
|
||||||
|
ta2, tb2 := a.(*dns.MX), b.(*dns.MX)
|
||||||
|
pa, pb := ta2.Preference, tb2.Preference
|
||||||
|
return pa < pb
|
||||||
|
}
|
||||||
|
return a.String() < b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteZoneFile writes a beautifully formatted zone file.
|
||||||
|
func WriteZoneFile(w io.Writer, records []dns.RR, origin string, defaultTtl uint32) error {
|
||||||
|
// This function prioritizes beauty over efficiency.
|
||||||
|
// * The zone records are sorted by label, grouped by subzones to
|
||||||
|
// be easy to read and pleasant to the eye.
|
||||||
|
// * Within a label, SOA and NS records are listed first.
|
||||||
|
// * MX records are sorted numericly by preference value.
|
||||||
|
// * A records are sorted by IP address, not lexicographically.
|
||||||
|
// * Repeated labels are removed.
|
||||||
|
// * $TTL is used to eliminate clutter.
|
||||||
|
// * "@" is used instead of the apex domain name.
|
||||||
|
|
||||||
|
z := &zoneGenData{
|
||||||
|
Origin: origin,
|
||||||
|
DefaultTtl: defaultTtl,
|
||||||
|
}
|
||||||
|
z.Records = nil
|
||||||
|
for _, r := range records {
|
||||||
|
z.Records = append(z.Records, r)
|
||||||
|
}
|
||||||
|
return z.generateZoneFileHelper(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateZoneFileHelper creates a pretty zonefile.
|
||||||
|
func (z *zoneGenData) generateZoneFileHelper(w io.Writer) error {
|
||||||
|
|
||||||
|
nameShortPrevious := ""
|
||||||
|
|
||||||
|
sort.Sort(z)
|
||||||
|
fmt.Fprintln(w, "$TTL", z.DefaultTtl)
|
||||||
|
for i, rr := range z.Records {
|
||||||
|
line := rr.String()
|
||||||
|
if line[0] == ';' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hdr := rr.Header()
|
||||||
|
|
||||||
|
items := strings.SplitN(line, "\t", 5)
|
||||||
|
if len(items) < 5 {
|
||||||
|
log.Fatalf("Too few items in: %v", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
// items[0]: name
|
||||||
|
nameFqdn := hdr.Name
|
||||||
|
nameShort := dnsutil.TrimDomainName(nameFqdn, z.Origin)
|
||||||
|
name := nameShort
|
||||||
|
if i > 0 && nameShort == nameShortPrevious {
|
||||||
|
name = ""
|
||||||
|
} else {
|
||||||
|
name = nameShort
|
||||||
|
}
|
||||||
|
nameShortPrevious = nameShort
|
||||||
|
|
||||||
|
// items[1]: ttl
|
||||||
|
ttl := ""
|
||||||
|
if hdr.Ttl != z.DefaultTtl && hdr.Ttl != 0 {
|
||||||
|
ttl = items[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// items[2]: class
|
||||||
|
if hdr.Class != dns.ClassINET {
|
||||||
|
log.Fatalf("Unimplemented class=%v", items[2])
|
||||||
|
}
|
||||||
|
|
||||||
|
// items[3]: type
|
||||||
|
typeStr := dns.TypeToString[hdr.Rrtype]
|
||||||
|
|
||||||
|
// items[4]: the remaining line
|
||||||
|
target := items[4]
|
||||||
|
//if typeStr == "TXT" {
|
||||||
|
// fmt.Printf("generateZoneFileHelper.go: target=%#v\n", target)
|
||||||
|
//}
|
||||||
|
|
||||||
|
fmt.Fprintln(w, formatLine([]int{10, 5, 2, 5, 0}, []string{name, ttl, "IN", typeStr, target}))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatLine(lengths []int, fields []string) string {
|
||||||
|
c := 0
|
||||||
|
result := ""
|
||||||
|
for i, length := range lengths {
|
||||||
|
item := fields[i]
|
||||||
|
for len(result) < c {
|
||||||
|
result += " "
|
||||||
|
}
|
||||||
|
if item != "" {
|
||||||
|
result += item + " "
|
||||||
|
}
|
||||||
|
c += length + 1
|
||||||
|
}
|
||||||
|
return strings.TrimRight(result, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func zoneLabelLess(a, b string) bool {
|
||||||
|
// Compare two zone labels for the purpose of sorting the RRs in a Zone.
|
||||||
|
|
||||||
|
// If they are equal, we are done. All other code is simplified
|
||||||
|
// because we can assume a!=b.
|
||||||
|
if a == b {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort @ at the top, then *, then everything else lexigraphically.
|
||||||
|
// i.e. @ always is less. * is is less than everything but @.
|
||||||
|
if a == "@" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if b == "@" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if a == "*" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if b == "*" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split into elements and match up last elements to first. Compare the
|
||||||
|
// first non-equal elements.
|
||||||
|
|
||||||
|
as := strings.Split(a, ".")
|
||||||
|
bs := strings.Split(b, ".")
|
||||||
|
ia := len(as) - 1
|
||||||
|
ib := len(bs) - 1
|
||||||
|
|
||||||
|
var min int
|
||||||
|
if ia < ib {
|
||||||
|
min = len(as) - 1
|
||||||
|
} else {
|
||||||
|
min = len(bs) - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip the matching highest elements, then compare the next item.
|
||||||
|
for i, j := ia, ib; min >= 0; i, j, min = i-1, j-1, min-1 {
|
||||||
|
if as[i] != bs[j] {
|
||||||
|
return as[i] < bs[j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// The min top elements were equal, so the shorter name is less.
|
||||||
|
return ia < ib
|
||||||
|
}
|
||||||
|
|
||||||
|
func zoneRrtypeLess(a, b uint16) bool {
|
||||||
|
// Compare two RR types for the purpose of sorting the RRs in a Zone.
|
||||||
|
|
||||||
|
// If they are equal, we are done. All other code is simplified
|
||||||
|
// because we can assume a!=b.
|
||||||
|
if a == b {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// List SOAs, then NSs, then all others.
|
||||||
|
// i.e. SOA is always less. NS is less than everything but SOA.
|
||||||
|
if a == dns.TypeSOA {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if b == dns.TypeSOA {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if a == dns.TypeNS {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if b == dns.TypeNS {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return a < b
|
||||||
|
}
|
299
providers/bind/prettyzone_test.go
Normal file
299
providers/bind/prettyzone_test.go
Normal file
|
@ -0,0 +1,299 @@
|
||||||
|
package bind
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
"github.com/miekg/dns/dnsutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseAndRegen(t *testing.T, buf *bytes.Buffer, expected string) {
|
||||||
|
// Take a zonefile, parse it, then generate a zone. We should
|
||||||
|
// get back the same string.
|
||||||
|
// This is used after any WriteZoneFile test as an extra verification step.
|
||||||
|
|
||||||
|
// Parse the output:
|
||||||
|
var parsed []dns.RR
|
||||||
|
for x := range dns.ParseZone(buf, "bosun.org", "bosun.org.zone") {
|
||||||
|
if x.Error != nil {
|
||||||
|
log.Fatalf("Error in zonefile: %v", x.Error)
|
||||||
|
} else {
|
||||||
|
parsed = append(parsed, x.RR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Generate it back:
|
||||||
|
buf2 := &bytes.Buffer{}
|
||||||
|
WriteZoneFile(buf2, parsed, "bosun.org.", 300)
|
||||||
|
|
||||||
|
// Compare:
|
||||||
|
if buf2.String() != expected {
|
||||||
|
t.Fatalf("Regenerated zonefile does not match: got=(\n%v\n)\nexpected=(\n%v\n)\n", buf2.String(), expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func WriteZoneFile
|
||||||
|
|
||||||
|
func TestWriteZoneFileSimple(t *testing.T) {
|
||||||
|
r1, _ := dns.NewRR("bosun.org. 300 IN A 192.30.252.153")
|
||||||
|
r2, _ := dns.NewRR("bosun.org. 300 IN A 192.30.252.154")
|
||||||
|
r3, _ := dns.NewRR("www.bosun.org. 300 IN CNAME bosun.org.")
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
WriteZoneFile(buf, []dns.RR{r1, r2, r3}, "bosun.org.", 300)
|
||||||
|
expected := `$TTL 300
|
||||||
|
@ IN A 192.30.252.153
|
||||||
|
IN A 192.30.252.154
|
||||||
|
www IN CNAME bosun.org.
|
||||||
|
`
|
||||||
|
if buf.String() != expected {
|
||||||
|
t.Log(buf.String())
|
||||||
|
t.Log(expected)
|
||||||
|
t.Fatalf("Zone file does not match.")
|
||||||
|
}
|
||||||
|
|
||||||
|
parseAndRegen(t, buf, expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteZoneFileMx(t *testing.T) {
|
||||||
|
//exhibits explicit ttls and long name
|
||||||
|
r1, _ := dns.NewRR(`bosun.org. 300 IN TXT "aaa"`)
|
||||||
|
r2, _ := dns.NewRR(`bosun.org. 300 IN TXT "bbb"`)
|
||||||
|
r2.(*dns.TXT).Txt[0] = `b"bb`
|
||||||
|
r3, _ := dns.NewRR("bosun.org. 300 IN MX 1 ASPMX.L.GOOGLE.COM.")
|
||||||
|
r4, _ := dns.NewRR("bosun.org. 300 IN MX 5 ALT1.ASPMX.L.GOOGLE.COM.")
|
||||||
|
r5, _ := dns.NewRR("bosun.org. 300 IN MX 10 ASPMX3.GOOGLEMAIL.COM.")
|
||||||
|
r6, _ := dns.NewRR("bosun.org. 300 IN A 198.252.206.16")
|
||||||
|
r7, _ := dns.NewRR("*.bosun.org. 600 IN A 198.252.206.16")
|
||||||
|
r8, _ := dns.NewRR(`_domainkey.bosun.org. 300 IN TXT "vvvv"`)
|
||||||
|
r9, _ := dns.NewRR(`google._domainkey.bosun.org. 300 IN TXT "\"foo\""`)
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
WriteZoneFile(buf, []dns.RR{r1, r2, r3, r4, r5, r6, r7, r8, r9}, "bosun.org", 300)
|
||||||
|
if buf.String() != testdataZFMX {
|
||||||
|
t.Log(buf.String())
|
||||||
|
t.Log(testdataZFMX)
|
||||||
|
t.Fatalf("Zone file does not match.")
|
||||||
|
}
|
||||||
|
parseAndRegen(t, buf, testdataZFMX)
|
||||||
|
}
|
||||||
|
|
||||||
|
var testdataZFMX = `$TTL 300
|
||||||
|
@ IN A 198.252.206.16
|
||||||
|
IN MX 1 ASPMX.L.GOOGLE.COM.
|
||||||
|
IN MX 5 ALT1.ASPMX.L.GOOGLE.COM.
|
||||||
|
IN MX 10 ASPMX3.GOOGLEMAIL.COM.
|
||||||
|
IN TXT "aaa"
|
||||||
|
IN TXT "b\"bb"
|
||||||
|
* 600 IN A 198.252.206.16
|
||||||
|
_domainkey IN TXT "vvvv"
|
||||||
|
google._domainkey IN TXT "\"foo\""
|
||||||
|
`
|
||||||
|
|
||||||
|
func TestWriteZoneFileOrder(t *testing.T) {
|
||||||
|
var records []dns.RR
|
||||||
|
for i, td := range []string{
|
||||||
|
"@",
|
||||||
|
"@",
|
||||||
|
"@",
|
||||||
|
"stackoverflow.com.",
|
||||||
|
"*",
|
||||||
|
"foo",
|
||||||
|
"bar.foo",
|
||||||
|
"hip.foo",
|
||||||
|
"mup",
|
||||||
|
"a.mup",
|
||||||
|
"bzt.mup",
|
||||||
|
"aaa.bzt.mup",
|
||||||
|
"zzz.bzt.mup",
|
||||||
|
"nnn.mup",
|
||||||
|
"zt.mup",
|
||||||
|
"zap",
|
||||||
|
} {
|
||||||
|
name := dnsutil.AddOrigin(td, "stackoverflow.com.")
|
||||||
|
r, _ := dns.NewRR(fmt.Sprintf("%s 300 IN A 1.2.3.%d", name, i))
|
||||||
|
records = append(records, r)
|
||||||
|
}
|
||||||
|
records[0].Header().Name = "stackoverflow.com."
|
||||||
|
records[1].Header().Name = "@"
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
WriteZoneFile(buf, records, "stackoverflow.com.", 300)
|
||||||
|
// Compare
|
||||||
|
if buf.String() != testdataOrder {
|
||||||
|
t.Log(buf.String())
|
||||||
|
t.Log(testdataOrder)
|
||||||
|
t.Fatalf("Zone file does not match.")
|
||||||
|
}
|
||||||
|
parseAndRegen(t, buf, testdataOrder)
|
||||||
|
|
||||||
|
// Now shuffle the list many times and make sure it still works:
|
||||||
|
|
||||||
|
for iteration := 5; iteration > 0; iteration-- {
|
||||||
|
// Randomize the list:
|
||||||
|
perm := rand.Perm(len(records))
|
||||||
|
for i, v := range perm {
|
||||||
|
records[i], records[v] = records[v], records[i]
|
||||||
|
//fmt.Println(i, v)
|
||||||
|
}
|
||||||
|
// Generate
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
WriteZoneFile(buf, records, "stackoverflow.com.", 300)
|
||||||
|
// Compare
|
||||||
|
if buf.String() != testdataOrder {
|
||||||
|
t.Log(buf.String())
|
||||||
|
t.Log(testdataOrder)
|
||||||
|
t.Fatalf("Zone file does not match.")
|
||||||
|
}
|
||||||
|
parseAndRegen(t, buf, testdataOrder)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testdataOrder = `$TTL 300
|
||||||
|
@ IN A 1.2.3.0
|
||||||
|
IN A 1.2.3.1
|
||||||
|
IN A 1.2.3.2
|
||||||
|
IN A 1.2.3.3
|
||||||
|
* IN A 1.2.3.4
|
||||||
|
foo IN A 1.2.3.5
|
||||||
|
bar.foo IN A 1.2.3.6
|
||||||
|
hip.foo IN A 1.2.3.7
|
||||||
|
mup IN A 1.2.3.8
|
||||||
|
a.mup IN A 1.2.3.9
|
||||||
|
bzt.mup IN A 1.2.3.10
|
||||||
|
aaa.bzt.mup IN A 1.2.3.11
|
||||||
|
zzz.bzt.mup IN A 1.2.3.12
|
||||||
|
nnn.mup IN A 1.2.3.13
|
||||||
|
zt.mup IN A 1.2.3.14
|
||||||
|
zap IN A 1.2.3.15
|
||||||
|
`
|
||||||
|
|
||||||
|
// func formatLine
|
||||||
|
|
||||||
|
func TestFormatLine(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
lengths []int
|
||||||
|
fields []string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{[]int{2, 2, 0}, []string{"a", "b", "c"}, "a b c"},
|
||||||
|
{[]int{2, 2, 0}, []string{"aaaaa", "b", "c"}, "aaaaa b c"},
|
||||||
|
}
|
||||||
|
for _, ts := range tests {
|
||||||
|
actual := formatLine(ts.lengths, ts.fields)
|
||||||
|
if actual != ts.expected {
|
||||||
|
t.Errorf("\"%s\" != \"%s\"", actual, ts.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func zoneLabelLess
|
||||||
|
|
||||||
|
func TestZoneLabelLess(t *testing.T) {
|
||||||
|
/*
|
||||||
|
The zone should sort in prefix traversal order:
|
||||||
|
|
||||||
|
@
|
||||||
|
*
|
||||||
|
foo
|
||||||
|
bar.foo
|
||||||
|
hip.foo
|
||||||
|
mup
|
||||||
|
a.mup
|
||||||
|
bzt.mup
|
||||||
|
aaa.bzt.mup
|
||||||
|
zzz.bzt.mup
|
||||||
|
nnn.mup
|
||||||
|
zt.mup
|
||||||
|
zap
|
||||||
|
*/
|
||||||
|
|
||||||
|
var tests = []struct {
|
||||||
|
e1, e2 string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{"@", "@", false},
|
||||||
|
{"@", "*", true},
|
||||||
|
{"@", "b", true},
|
||||||
|
{"*", "@", false},
|
||||||
|
{"*", "*", false},
|
||||||
|
{"*", "b", true},
|
||||||
|
{"foo", "foo", false},
|
||||||
|
{"foo", "bar", false},
|
||||||
|
{"bar", "foo", true},
|
||||||
|
{"a.mup", "mup", false},
|
||||||
|
{"mup", "a.mup", true},
|
||||||
|
{"a.mup", "a.mup", false},
|
||||||
|
{"a.mup", "bzt.mup", true},
|
||||||
|
{"a.mup", "aa.mup", true},
|
||||||
|
{"zt.mup", "aaa.bzt.mup", false},
|
||||||
|
{"aaa.bzt.mup", "mup", false},
|
||||||
|
{"nnn.mup", "aaa.bzt.mup", false},
|
||||||
|
{`www\.miek.nl`, `www.miek.nl`, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
actual := zoneLabelLess(test.e1, test.e2)
|
||||||
|
if test.expected != actual {
|
||||||
|
t.Errorf("%v: expected (%v) got (%v)\n", test.e1, test.e2, actual)
|
||||||
|
}
|
||||||
|
actual = zoneLabelLess(test.e2, test.e1)
|
||||||
|
// The reverse should work too:
|
||||||
|
var expected bool
|
||||||
|
if test.e1 == test.e2 {
|
||||||
|
expected = false
|
||||||
|
} else {
|
||||||
|
expected = !test.expected
|
||||||
|
}
|
||||||
|
if expected != actual {
|
||||||
|
t.Errorf("%v: expected (%v) got (%v)\n", test.e1, test.e2, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestZoneRrtypeLess(t *testing.T) {
|
||||||
|
/*
|
||||||
|
In zonefiles we want to list SOAs, then NSs, then all others.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var tests = []struct {
|
||||||
|
e1, e2 uint16
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{dns.TypeSOA, dns.TypeSOA, false},
|
||||||
|
{dns.TypeSOA, dns.TypeA, true},
|
||||||
|
{dns.TypeSOA, dns.TypeTXT, true},
|
||||||
|
{dns.TypeSOA, dns.TypeNS, true},
|
||||||
|
{dns.TypeNS, dns.TypeSOA, false},
|
||||||
|
{dns.TypeNS, dns.TypeA, true},
|
||||||
|
{dns.TypeNS, dns.TypeTXT, true},
|
||||||
|
{dns.TypeNS, dns.TypeNS, false},
|
||||||
|
{dns.TypeA, dns.TypeSOA, false},
|
||||||
|
{dns.TypeA, dns.TypeA, false},
|
||||||
|
{dns.TypeA, dns.TypeTXT, true},
|
||||||
|
{dns.TypeA, dns.TypeNS, false},
|
||||||
|
{dns.TypeMX, dns.TypeSOA, false},
|
||||||
|
{dns.TypeMX, dns.TypeA, false},
|
||||||
|
{dns.TypeMX, dns.TypeTXT, true},
|
||||||
|
{dns.TypeMX, dns.TypeNS, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
actual := zoneRrtypeLess(test.e1, test.e2)
|
||||||
|
if test.expected != actual {
|
||||||
|
t.Errorf("%v: expected (%v) got (%v)\n", test.e1, test.e2, actual)
|
||||||
|
}
|
||||||
|
actual = zoneRrtypeLess(test.e2, test.e1)
|
||||||
|
// The reverse should work too:
|
||||||
|
var expected bool
|
||||||
|
if test.e1 == test.e2 {
|
||||||
|
expected = false
|
||||||
|
} else {
|
||||||
|
expected = !test.expected
|
||||||
|
}
|
||||||
|
if expected != actual {
|
||||||
|
t.Errorf("%v: expected (%v) got (%v)\n", test.e1, test.e2, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
71
providers/bind/serial.go
Normal file
71
providers/bind/serial.go
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package bind
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var nowFunc func() time.Time = time.Now
|
||||||
|
|
||||||
|
// generate_serial takes an old SOA serial number and increments it.
|
||||||
|
func generate_serial(old_serial uint32) uint32 {
|
||||||
|
// Serial numbers are in the format yyyymmddvv
|
||||||
|
// where vv is a version count that starts at 01 each day.
|
||||||
|
// Multiple serial numbers generated on the same day increase vv.
|
||||||
|
// If the old serial number is not in this format, it gets replaced
|
||||||
|
// with the new format. However if that would mean a new serial number
|
||||||
|
// that is smaller than the old one, we punt and increment the old number.
|
||||||
|
// At no time will a serial number == 0 be returned.
|
||||||
|
|
||||||
|
original := old_serial
|
||||||
|
old_serialStr := strconv.FormatUint(uint64(old_serial), 10)
|
||||||
|
var new_serial uint32
|
||||||
|
|
||||||
|
// Make draft new serial number:
|
||||||
|
today := nowFunc().UTC()
|
||||||
|
todayStr := today.Format("20060102")
|
||||||
|
version := uint32(1)
|
||||||
|
todayNum, err := strconv.ParseUint(todayStr, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("new serial won't fit in 32 bits: %v", err)
|
||||||
|
}
|
||||||
|
draft := uint32(todayNum)*100 + version
|
||||||
|
|
||||||
|
method := "none" // Used only in debugging.
|
||||||
|
if old_serial > draft {
|
||||||
|
// If old_serial was really slow, upgrade to new yyyymmddvv standard:
|
||||||
|
method = "o>d"
|
||||||
|
new_serial = old_serial + 1
|
||||||
|
new_serial = old_serial + 1
|
||||||
|
} else if old_serial == draft {
|
||||||
|
// Edge case: increment old serial:
|
||||||
|
method = "o=d"
|
||||||
|
new_serial = draft + 1
|
||||||
|
} else if len(old_serialStr) != 10 {
|
||||||
|
// If old_serial is wrong number of digits, upgrade to yyyymmddvv standard:
|
||||||
|
method = "len!=10"
|
||||||
|
new_serial = draft
|
||||||
|
} else if strings.HasPrefix(old_serialStr, todayStr) {
|
||||||
|
// If old_serial just needs to be incremented:
|
||||||
|
method = "prefix"
|
||||||
|
new_serial = old_serial + 1
|
||||||
|
} else {
|
||||||
|
// First serial number to be requested today:
|
||||||
|
method = "default"
|
||||||
|
new_serial = draft
|
||||||
|
}
|
||||||
|
|
||||||
|
if new_serial == 0 {
|
||||||
|
// We never return 0 as the serial number.
|
||||||
|
new_serial = 1
|
||||||
|
}
|
||||||
|
if old_serial == new_serial {
|
||||||
|
log.Fatalf("%v: old_serial == new_serial (%v == %v) draft=%v method=%v", original, old_serial, new_serial, draft, method)
|
||||||
|
}
|
||||||
|
if old_serial > new_serial {
|
||||||
|
log.Fatalf("%v: old_serial > new_serial (%v > %v) draft=%v method=%v", original, old_serial, new_serial, draft, method)
|
||||||
|
}
|
||||||
|
return new_serial
|
||||||
|
}
|
51
providers/bind/serial_test.go
Normal file
51
providers/bind/serial_test.go
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
package bind
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_generate_serial_1(t *testing.T) {
|
||||||
|
d1, _ := time.Parse("20060102", "20150108")
|
||||||
|
d4, _ := time.Parse("20060102", "40150108")
|
||||||
|
d12, _ := time.Parse("20060102", "20151231")
|
||||||
|
var tests = []struct {
|
||||||
|
Given uint32
|
||||||
|
Today time.Time
|
||||||
|
Expected uint32
|
||||||
|
}{
|
||||||
|
{0, d1, 2015010801},
|
||||||
|
{123, d1, 2015010801},
|
||||||
|
{2015010800, d1, 2015010801},
|
||||||
|
{2015010801, d1, 2015010802},
|
||||||
|
{2015010802, d1, 2015010803},
|
||||||
|
{2015010898, d1, 2015010899},
|
||||||
|
{2015010899, d1, 2015010900},
|
||||||
|
{2015090401, d1, 2015090402},
|
||||||
|
{201509040, d1, 2015010801},
|
||||||
|
{20150904, d1, 2015010801},
|
||||||
|
{2015090, d1, 2015010801},
|
||||||
|
// Verify 32-bits is enough to carry us 200 years in the future:
|
||||||
|
{4015090401, d4, 4015090402},
|
||||||
|
// Verify Dec 31 edge-case:
|
||||||
|
{2015123099, d12, 2015123101},
|
||||||
|
{2015123100, d12, 2015123101},
|
||||||
|
{2015123101, d12, 2015123102},
|
||||||
|
{2015123102, d12, 2015123103},
|
||||||
|
{2015123198, d12, 2015123199},
|
||||||
|
{2015123199, d12, 2015123200},
|
||||||
|
{2015123200, d12, 2015123201},
|
||||||
|
{201512310, d12, 2015123101},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tst := range tests {
|
||||||
|
expected := tst.Expected
|
||||||
|
nowFunc = func() time.Time {
|
||||||
|
return tst.Today
|
||||||
|
}
|
||||||
|
found := generate_serial(tst.Given)
|
||||||
|
if expected != found {
|
||||||
|
t.Fatalf("Test:%d/%v: Expected (%d) got (%d)\n", i, tst.Given, expected, found)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
306
providers/cloudflare/cloudflareProvider.go
Normal file
306
providers/cloudflare/cloudflareProvider.go
Normal file
|
@ -0,0 +1,306 @@
|
||||||
|
package cloudflare
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/miekg/dns/dnsutil"
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers/diff"
|
||||||
|
"github.com/StackExchange/dnscontrol/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
Cloudflare APi DNS provider:
|
||||||
|
|
||||||
|
Info required in `creds.json`:
|
||||||
|
- apikey
|
||||||
|
- apiuser
|
||||||
|
|
||||||
|
Record level metadata availible:
|
||||||
|
- cloudflare_proxy ("true" or "false")
|
||||||
|
|
||||||
|
Domain level metadata availible:
|
||||||
|
- cloudflare_proxy_default ("true" or "false")
|
||||||
|
|
||||||
|
Provider level metadata availible:
|
||||||
|
- ip_conversions
|
||||||
|
- secret_ips
|
||||||
|
*/
|
||||||
|
|
||||||
|
type CloudflareApi struct {
|
||||||
|
ApiKey string `json:"apikey"`
|
||||||
|
ApiUser string `json:"apiuser"`
|
||||||
|
domainIndex map[string]string
|
||||||
|
nameservers map[string][]*models.Nameserver
|
||||||
|
ipConversions []transform.IpConversion
|
||||||
|
secretIPs []net.IP
|
||||||
|
ignoredLabels []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func labelMatches(label string, matches []string) bool {
|
||||||
|
//log.Printf("DEBUG: labelMatches(%#v, %#v)\n", label, matches)
|
||||||
|
for _, tst := range matches {
|
||||||
|
if label == tst {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CloudflareApi) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {
|
||||||
|
if c.domainIndex == nil {
|
||||||
|
if err := c.fetchDomainList(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
id, ok := c.domainIndex[dc.Name]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("%s not listed in zones for cloudflare account", dc.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
dc.Nameservers = c.nameservers[dc.Name]
|
||||||
|
if err := c.preprocessConfig(dc); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
records, err := c.getRecordsForDomain(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//for _, rec := range records {
|
||||||
|
for i := len(records) - 1; i >= 0; i-- {
|
||||||
|
rec := records[i]
|
||||||
|
// Delete ignore labels
|
||||||
|
if labelMatches(dnsutil.TrimDomainName(rec.(*cfRecord).Name, dc.Name), c.ignoredLabels) {
|
||||||
|
fmt.Printf("ignored_label: %s\n", rec.(*cfRecord).Name)
|
||||||
|
records = append(records[:i], records[i+1:]...)
|
||||||
|
}
|
||||||
|
//normalize cname,mx,ns records with dots to be consistent with our config format.
|
||||||
|
t := rec.(*cfRecord).Type
|
||||||
|
if t == "CNAME" || t == "MX" || t == "NS" {
|
||||||
|
rec.(*cfRecord).Content = dnsutil.AddOrigin(rec.(*cfRecord).Content+".", dc.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedRecords := make([]diff.Record, 0, len(dc.Records))
|
||||||
|
for _, rec := range dc.Records {
|
||||||
|
if labelMatches(rec.Name, c.ignoredLabels) {
|
||||||
|
log.Fatalf("FATAL: dnsconfig contains label that matches ignored_labels: %#v is in %v)\n", rec.Name, c.ignoredLabels)
|
||||||
|
// Since we log.Fatalf, we don't need to be clean here.
|
||||||
|
}
|
||||||
|
expectedRecords = append(expectedRecords, recordWrapper{rec})
|
||||||
|
}
|
||||||
|
_, create, del, mod := diff.IncrementalDiff(records, expectedRecords)
|
||||||
|
corrections := []*models.Correction{}
|
||||||
|
|
||||||
|
for _, d := range del {
|
||||||
|
corrections = append(corrections, c.deleteRec(d.Existing.(*cfRecord), id))
|
||||||
|
}
|
||||||
|
for _, d := range create {
|
||||||
|
corrections = append(corrections, c.createRec(d.Desired.(recordWrapper).RecordConfig, id)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range mod {
|
||||||
|
e, rec := d.Existing.(*cfRecord), d.Desired.(recordWrapper)
|
||||||
|
proxy := e.Proxiable && rec.Metadata[metaProxy] != "off"
|
||||||
|
corrections = append(corrections, &models.Correction{
|
||||||
|
Msg: fmt.Sprintf("MODIFY record %s %s: (%s %s) => (%s %s)", rec.Name, rec.Type, e.Content, e.GetComparisionData(), rec.Target, rec.GetComparisionData()),
|
||||||
|
F: func() error { return c.modifyRecord(id, e.ID, proxy, rec.RecordConfig) },
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return corrections, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
metaProxy = "cloudflare_proxy"
|
||||||
|
metaProxyDefault = metaProxy + "_default"
|
||||||
|
metaOriginalIP = "original_ip" // TODO(tlim): Unclear what this means.
|
||||||
|
metaIPConversions = "ip_conversions" // TODO(tlim): Rename to obscure_rules.
|
||||||
|
metaSecretIPs = "secret_ips" // TODO(tlim): Rename to obscured_cidrs.
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkProxyVal(v string) (string, error) {
|
||||||
|
v = strings.ToLower(v)
|
||||||
|
if v != "on" && v != "off" && v != "full" {
|
||||||
|
return "", fmt.Errorf("Bad metadata value for cloudflare_proxy: '%s'. Use on/off/full", v)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CloudflareApi) preprocessConfig(dc *models.DomainConfig) error {
|
||||||
|
|
||||||
|
// Determine the default proxy setting.
|
||||||
|
var defProxy string
|
||||||
|
var err error
|
||||||
|
if defProxy = dc.Metadata[metaProxyDefault]; defProxy == "" {
|
||||||
|
defProxy = "off"
|
||||||
|
} else {
|
||||||
|
defProxy, err = checkProxyVal(defProxy)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize the proxy setting for each record.
|
||||||
|
// A and CNAMEs: Validate. If null, set to default.
|
||||||
|
// else: Make sure it wasn't set. Set to default.
|
||||||
|
for _, rec := range dc.Records {
|
||||||
|
if rec.Type != "A" && rec.Type != "CNAME" && rec.Type != "AAAA" {
|
||||||
|
if rec.Metadata[metaProxy] != "" {
|
||||||
|
return fmt.Errorf("cloudflare_proxy set on %v record: %#v cloudflare_proxy=%#v", rec.Type, rec.Name, rec.Metadata[metaProxy])
|
||||||
|
}
|
||||||
|
// Force it to off.
|
||||||
|
rec.Metadata[metaProxy] = "off"
|
||||||
|
} else {
|
||||||
|
if val := rec.Metadata[metaProxy]; val == "" {
|
||||||
|
rec.Metadata[metaProxy] = defProxy
|
||||||
|
} else {
|
||||||
|
val, err := checkProxyVal(val)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rec.Metadata[metaProxy] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// look for ip conversions and transform records
|
||||||
|
for _, rec := range dc.Records {
|
||||||
|
if rec.TTL == 0 {
|
||||||
|
rec.TTL = 1
|
||||||
|
}
|
||||||
|
if rec.Type != "A" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//only transform "full"
|
||||||
|
if rec.Metadata[metaProxy] != "full" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ip := net.ParseIP(rec.Target)
|
||||||
|
if ip == nil {
|
||||||
|
return fmt.Errorf("%s is not a valid ip address", rec.Target)
|
||||||
|
}
|
||||||
|
newIP, err := transform.TransformIP(ip, c.ipConversions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rec.Metadata[metaOriginalIP] = rec.Target
|
||||||
|
rec.Target = newIP.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCloudflare(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {
|
||||||
|
api := &CloudflareApi{}
|
||||||
|
api.ApiUser, api.ApiKey = m["apiuser"], m["apikey"]
|
||||||
|
// check api keys from creds json file
|
||||||
|
if api.ApiKey == "" || api.ApiUser == "" {
|
||||||
|
return nil, fmt.Errorf("Cloudflare apikey and apiuser must be provided.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metadata) > 0 {
|
||||||
|
parsedMeta := &struct {
|
||||||
|
IPConversions string `json:"ip_conversions"`
|
||||||
|
SecretIps []interface{} `json:"secret_ips"`
|
||||||
|
IgnoredLabels []string `json:"ignored_labels"`
|
||||||
|
}{}
|
||||||
|
err := json.Unmarshal([]byte(metadata), parsedMeta)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// ignored_labels:
|
||||||
|
for _, l := range parsedMeta.IgnoredLabels {
|
||||||
|
api.ignoredLabels = append(api.ignoredLabels, l)
|
||||||
|
}
|
||||||
|
// parse provider level metadata
|
||||||
|
api.ipConversions, err = transform.DecodeTransformTable(parsedMeta.IPConversions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ips := []net.IP{}
|
||||||
|
for _, ipStr := range parsedMeta.SecretIps {
|
||||||
|
var ip net.IP
|
||||||
|
if ip, err = models.InterfaceToIP(ipStr); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ips = append(ips, ip)
|
||||||
|
}
|
||||||
|
api.secretIPs = ips
|
||||||
|
}
|
||||||
|
return api, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
providers.RegisterDomainServiceProviderType("CLOUDFLAREAPI", newCloudflare)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Used on the "existing" records.
|
||||||
|
type cfRecord struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
Proxiable bool `json:"proxiable"`
|
||||||
|
Proxied bool `json:"proxied"`
|
||||||
|
TTL int `json:"ttl"`
|
||||||
|
Locked bool `json:"locked"`
|
||||||
|
ZoneID string `json:"zone_id"`
|
||||||
|
ZoneName string `json:"zone_name"`
|
||||||
|
CreatedOn time.Time `json:"created_on"`
|
||||||
|
ModifiedOn time.Time `json:"modified_on"`
|
||||||
|
Data interface{} `json:"data"`
|
||||||
|
Priority int `json:"priority"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfRecord) GetName() string {
|
||||||
|
return c.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfRecord) GetType() string {
|
||||||
|
return c.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfRecord) GetContent() string {
|
||||||
|
return c.Content
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfRecord) GetComparisionData() string {
|
||||||
|
mxPrio := ""
|
||||||
|
if c.Type == "MX" {
|
||||||
|
mxPrio = fmt.Sprintf(" %d ", c.Priority)
|
||||||
|
}
|
||||||
|
proxy := ""
|
||||||
|
if c.Type == "A" || c.Type == "CNAME" || c.Type == "AAAA" {
|
||||||
|
proxy = fmt.Sprintf(" proxy=%v ", c.Proxied)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d%s%s", c.TTL, mxPrio, proxy)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Used on the "expected" records.
|
||||||
|
type recordWrapper struct {
|
||||||
|
*models.RecordConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c recordWrapper) GetComparisionData() string {
|
||||||
|
mxPrio := ""
|
||||||
|
if c.Type == "MX" {
|
||||||
|
mxPrio = fmt.Sprintf(" %d ", c.Priority)
|
||||||
|
}
|
||||||
|
proxy := ""
|
||||||
|
if c.Type == "A" || c.Type == "AAAA" || c.Type == "CNAME" {
|
||||||
|
proxy = fmt.Sprintf(" proxy=%v ", c.Metadata[metaProxy] != "off")
|
||||||
|
}
|
||||||
|
|
||||||
|
ttl := c.TTL
|
||||||
|
if ttl == 0 {
|
||||||
|
ttl = 1
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d%s%s", ttl, mxPrio, proxy)
|
||||||
|
}
|
116
providers/cloudflare/preprocess_test.go
Normal file
116
providers/cloudflare/preprocess_test.go
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
package cloudflare
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"github.com/StackExchange/dnscontrol/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newDomainConfig() *models.DomainConfig {
|
||||||
|
return &models.DomainConfig{
|
||||||
|
Name: "test.com",
|
||||||
|
Records: []*models.RecordConfig{},
|
||||||
|
Metadata: map[string]string{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPreprocess_BoolValidation(t *testing.T) {
|
||||||
|
cf := &CloudflareApi{}
|
||||||
|
domain := newDomainConfig()
|
||||||
|
domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "on"}})
|
||||||
|
domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "fUll"}})
|
||||||
|
domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{}})
|
||||||
|
domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "Off"}})
|
||||||
|
domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "off"}})
|
||||||
|
err := cf.preprocessConfig(domain)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
expected := []string{"on", "full", "off", "off", "off"}
|
||||||
|
// make sure only "on" or "off", and "full" are actually set
|
||||||
|
for i, rec := range domain.Records {
|
||||||
|
if rec.Metadata[metaProxy] != expected[i] {
|
||||||
|
t.Fatalf("At index %d: expect '%s' but found '%s'", i, expected[i], rec.Metadata[metaProxy])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPreprocess_BoolValidation_Fails(t *testing.T) {
|
||||||
|
cf := &CloudflareApi{}
|
||||||
|
domain := newDomainConfig()
|
||||||
|
domain.Records = append(domain.Records, &models.RecordConfig{Metadata: map[string]string{metaProxy: "true"}})
|
||||||
|
err := cf.preprocessConfig(domain)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected validation error, but got none")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPreprocess_DefaultProxy(t *testing.T) {
|
||||||
|
cf := &CloudflareApi{}
|
||||||
|
domain := newDomainConfig()
|
||||||
|
domain.Metadata[metaProxyDefault] = "full"
|
||||||
|
domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "on"}})
|
||||||
|
domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "off"}})
|
||||||
|
domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{}})
|
||||||
|
err := cf.preprocessConfig(domain)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
expected := []string{"on", "off", "full"}
|
||||||
|
for i, rec := range domain.Records {
|
||||||
|
if rec.Metadata[metaProxy] != expected[i] {
|
||||||
|
t.Fatalf("At index %d: expect '%s' but found '%s'", i, expected[i], rec.Metadata[metaProxy])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPreprocess_DefaultProxy_Validation(t *testing.T) {
|
||||||
|
cf := &CloudflareApi{}
|
||||||
|
domain := newDomainConfig()
|
||||||
|
domain.Metadata[metaProxyDefault] = "true"
|
||||||
|
err := cf.preprocessConfig(domain)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected validation error, but got none")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIpRewriting(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
Given, Expected string
|
||||||
|
Proxy string
|
||||||
|
}{
|
||||||
|
//outside of range
|
||||||
|
{"5.5.5.5", "5.5.5.5", "full"},
|
||||||
|
{"5.5.5.5", "5.5.5.5", "on"},
|
||||||
|
// inside range, but not proxied
|
||||||
|
{"1.2.3.4", "1.2.3.4", "on"},
|
||||||
|
//inside range and proxied
|
||||||
|
{"1.2.3.4", "255.255.255.4", "full"},
|
||||||
|
}
|
||||||
|
cf := &CloudflareApi{}
|
||||||
|
domain := newDomainConfig()
|
||||||
|
cf.ipConversions = []transform.IpConversion{{net.ParseIP("1.2.3.0"), net.ParseIP("1.2.3.40"), net.ParseIP("255.255.255.0"), nil}}
|
||||||
|
for _, tst := range tests {
|
||||||
|
rec := &models.RecordConfig{Type: "A", Target: tst.Given, Metadata: map[string]string{metaProxy: tst.Proxy}}
|
||||||
|
domain.Records = append(domain.Records, rec)
|
||||||
|
}
|
||||||
|
err := cf.preprocessConfig(domain)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for i, tst := range tests {
|
||||||
|
rec := domain.Records[i]
|
||||||
|
if rec.Target != tst.Expected {
|
||||||
|
t.Fatalf("At index %d, expected target of %s, but found %s.", i, tst.Expected, rec.Target)
|
||||||
|
}
|
||||||
|
if tst.Proxy == "full" && tst.Given != tst.Expected && rec.Metadata[metaOriginalIP] != tst.Given {
|
||||||
|
t.Fatalf("At index %d, expected original_ip to be set", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCnameValidation(t *testing.T) {
|
||||||
|
|
||||||
|
}
|
251
providers/cloudflare/rest.go
Normal file
251
providers/cloudflare/rest.go
Normal file
|
@ -0,0 +1,251 @@
|
||||||
|
package cloudflare
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers/diff"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
baseURL = "https://api.cloudflare.com/client/v4/"
|
||||||
|
zonesURL = baseURL + "zones/"
|
||||||
|
recordsURL = zonesURL + "%s/dns_records/"
|
||||||
|
singleRecordURL = recordsURL + "%s"
|
||||||
|
)
|
||||||
|
|
||||||
|
// get list of domains for account. Cache so the ids can be looked up from domain name
|
||||||
|
func (c *CloudflareApi) fetchDomainList() error {
|
||||||
|
c.domainIndex = map[string]string{}
|
||||||
|
c.nameservers = map[string][]*models.Nameserver{}
|
||||||
|
page := 1
|
||||||
|
for {
|
||||||
|
zr := &zoneResponse{}
|
||||||
|
url := fmt.Sprintf("%s?page=%d&per_page=50", zonesURL, page)
|
||||||
|
if err := c.get(url, zr); err != nil {
|
||||||
|
return fmt.Errorf("Error fetching domain list from cloudflare: %s", err)
|
||||||
|
}
|
||||||
|
if !zr.Success {
|
||||||
|
return fmt.Errorf("Error fetching domain list from cloudflare: %s", stringifyErrors(zr.Errors))
|
||||||
|
}
|
||||||
|
for _, zone := range zr.Result {
|
||||||
|
c.domainIndex[zone.Name] = zone.ID
|
||||||
|
for _, ns := range zone.Nameservers {
|
||||||
|
c.nameservers[zone.Name] = append(c.nameservers[zone.Name], &models.Nameserver{Name: ns})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ri := zr.ResultInfo
|
||||||
|
if len(zr.Result) == 0 || ri.Page*ri.PerPage >= ri.TotalCount {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
page++
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// get all records for a domain
|
||||||
|
func (c *CloudflareApi) getRecordsForDomain(id string) ([]diff.Record, error) {
|
||||||
|
url := fmt.Sprintf(recordsURL, id)
|
||||||
|
page := 1
|
||||||
|
records := []diff.Record{}
|
||||||
|
for {
|
||||||
|
reqURL := fmt.Sprintf("%s?page=%d&per_page=100", url, page)
|
||||||
|
var data recordsResponse
|
||||||
|
if err := c.get(reqURL, &data); err != nil {
|
||||||
|
return nil, fmt.Errorf("Error fetching record list from cloudflare: %s", err)
|
||||||
|
}
|
||||||
|
if !data.Success {
|
||||||
|
return nil, fmt.Errorf("Error fetching record list cloudflare: %s", stringifyErrors(data.Errors))
|
||||||
|
}
|
||||||
|
for _, rec := range data.Result {
|
||||||
|
records = append(records, rec)
|
||||||
|
}
|
||||||
|
ri := data.ResultInfo
|
||||||
|
if len(data.Result) == 0 || ri.Page*ri.PerPage >= ri.TotalCount {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
page++
|
||||||
|
}
|
||||||
|
return records, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a correction to delete a record
|
||||||
|
func (c *CloudflareApi) deleteRec(rec *cfRecord, domainID string) *models.Correction {
|
||||||
|
return &models.Correction{
|
||||||
|
Msg: fmt.Sprintf("DELETE record: %s %s %d %s (id=%s)", rec.Name, rec.Type, rec.TTL, rec.Content, rec.ID),
|
||||||
|
F: func() error {
|
||||||
|
endpoint := fmt.Sprintf(singleRecordURL, domainID, rec.ID)
|
||||||
|
req, err := http.NewRequest("DELETE", endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.setHeaders(req)
|
||||||
|
_, err = handleActionResponse(http.DefaultClient.Do(req))
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CloudflareApi) createRec(rec *models.RecordConfig, domainID string) []*models.Correction {
|
||||||
|
type createRecord struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
TTL uint32 `json:"ttl"`
|
||||||
|
Priority uint16 `json:"priority"`
|
||||||
|
}
|
||||||
|
var id string
|
||||||
|
content := rec.Target
|
||||||
|
if rec.Metadata[metaOriginalIP] != "" {
|
||||||
|
content = rec.Metadata[metaOriginalIP]
|
||||||
|
}
|
||||||
|
prio := ""
|
||||||
|
if rec.Type == "MX" {
|
||||||
|
prio = fmt.Sprintf(" %d ", rec.Priority)
|
||||||
|
}
|
||||||
|
arr := []*models.Correction{{
|
||||||
|
Msg: fmt.Sprintf("CREATE record: %s %s %d%s %s", rec.Name, rec.Type, rec.TTL, prio, content),
|
||||||
|
F: func() error {
|
||||||
|
|
||||||
|
cf := &createRecord{
|
||||||
|
Name: rec.Name,
|
||||||
|
Type: rec.Type,
|
||||||
|
TTL: rec.TTL,
|
||||||
|
Content: content,
|
||||||
|
Priority: rec.Priority,
|
||||||
|
}
|
||||||
|
endpoint := fmt.Sprintf(recordsURL, domainID)
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
encoder := json.NewEncoder(buf)
|
||||||
|
if err := encoder.Encode(cf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("POST", endpoint, buf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.setHeaders(req)
|
||||||
|
id, err = handleActionResponse(http.DefaultClient.Do(req))
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
if rec.Metadata[metaProxy] != "off" {
|
||||||
|
arr = append(arr, &models.Correction{
|
||||||
|
Msg: fmt.Sprintf("ACTIVATE PROXY for new record %s %s %d %s", rec.Name, rec.Type, rec.TTL, rec.Target),
|
||||||
|
F: func() error { return c.modifyRecord(domainID, id, true, rec) },
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return arr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CloudflareApi) modifyRecord(domainID, recID string, proxied bool, rec *models.RecordConfig) error {
|
||||||
|
if domainID == "" || recID == "" {
|
||||||
|
return fmt.Errorf("Cannot modify record if domain or record id are empty.")
|
||||||
|
}
|
||||||
|
type record struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Proxied bool `json:"proxied"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
Priority uint16 `json:"priority"`
|
||||||
|
TTL uint32 `json:"ttl"`
|
||||||
|
}
|
||||||
|
r := record{recID, proxied, rec.Name, rec.Type, rec.Target, rec.Priority, rec.TTL}
|
||||||
|
endpoint := fmt.Sprintf(singleRecordURL, domainID, recID)
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
encoder := json.NewEncoder(buf)
|
||||||
|
if err := encoder.Encode(r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("PUT", endpoint, buf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.setHeaders(req)
|
||||||
|
_, err = handleActionResponse(http.DefaultClient.Do(req))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// common error handling for all action responses
|
||||||
|
func handleActionResponse(resp *http.Response, err error) (id string, e error) {
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
result := &basicResponse{}
|
||||||
|
decoder := json.NewDecoder(resp.Body)
|
||||||
|
if err = decoder.Decode(result); err != nil {
|
||||||
|
return "", fmt.Errorf("Unknown error. Status code: %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
return "", fmt.Errorf(stringifyErrors(result.Errors))
|
||||||
|
}
|
||||||
|
return result.Result.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CloudflareApi) setHeaders(req *http.Request) {
|
||||||
|
req.Header.Set("X-Auth-Key", c.ApiKey)
|
||||||
|
req.Header.Set("X-Auth-Email", c.ApiUser)
|
||||||
|
}
|
||||||
|
|
||||||
|
// generic get handler. makes request and unmarshalls response to given interface
|
||||||
|
func (c *CloudflareApi) get(endpoint string, target interface{}) error {
|
||||||
|
req, err := http.NewRequest("GET", endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.setHeaders(req)
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
return fmt.Errorf("Bad status code from cloudflare: %d not 200.", resp.StatusCode)
|
||||||
|
}
|
||||||
|
decoder := json.NewDecoder(resp.Body)
|
||||||
|
return decoder.Decode(target)
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringifyErrors(errors []interface{}) string {
|
||||||
|
dat, err := json.Marshal(errors)
|
||||||
|
if err != nil {
|
||||||
|
return "???"
|
||||||
|
}
|
||||||
|
return string(dat)
|
||||||
|
}
|
||||||
|
|
||||||
|
type recordsResponse struct {
|
||||||
|
basicResponse
|
||||||
|
Result []*cfRecord `json:"result"`
|
||||||
|
ResultInfo pagingInfo `json:"result_info"`
|
||||||
|
}
|
||||||
|
type basicResponse struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Errors []interface{} `json:"errors"`
|
||||||
|
Messages []interface{} `json:"messages"`
|
||||||
|
Result struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
} `json:"result"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type zoneResponse struct {
|
||||||
|
basicResponse
|
||||||
|
Result []struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Nameservers []string `json:"name_servers"`
|
||||||
|
} `json:"result"`
|
||||||
|
ResultInfo pagingInfo `json:"result_info"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type pagingInfo struct {
|
||||||
|
Page int `json:"page"`
|
||||||
|
PerPage int `json:"per_page"`
|
||||||
|
Count int `json:"count"`
|
||||||
|
TotalCount int `json:"total_count"`
|
||||||
|
}
|
50
providers/config/providerConfig.go
Normal file
50
providers/config/providerConfig.go
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
// Package config provides functions for reading and parsing the provider credentials json file.
|
||||||
|
// It cleans nonstandard json features (comments and trailing commas), as well as replaces environment variable placeholders with
|
||||||
|
// their environment variable equivalents. To reference an environment variable in your json file, simply use values in this format:
|
||||||
|
// "key"="$ENV_VAR_NAME"
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/DisposaBoy/JsonConfigReader"
|
||||||
|
"github.com/TomOnTime/utfutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LoadProviderConfigs will open the specified file name, and parse its contents. It will replace environment variables it finds if any value matches $[A-Za-z_-0-9]+
|
||||||
|
func LoadProviderConfigs(fname string) (map[string]map[string]string, error) {
|
||||||
|
var results = map[string]map[string]string{}
|
||||||
|
dat, err := utfutil.ReadFile(fname, utfutil.POSIX)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("While reading provider credentials file %v: %v", fname, err)
|
||||||
|
}
|
||||||
|
s := string(dat)
|
||||||
|
r := JsonConfigReader.New(strings.NewReader(s))
|
||||||
|
err = json.NewDecoder(r).Decode(&results)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("While parsing provider credentials file %v: %v", fname, err)
|
||||||
|
}
|
||||||
|
if err = replaceEnvVars(results); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func replaceEnvVars(m map[string]map[string]string) error {
|
||||||
|
for provider, keys := range m {
|
||||||
|
for k, v := range keys {
|
||||||
|
if strings.HasPrefix(v, "$") {
|
||||||
|
env := v[1:]
|
||||||
|
newVal := os.Getenv(env)
|
||||||
|
if newVal == "" {
|
||||||
|
return fmt.Errorf("Provider %s references environment variable %s, but has no value.", provider, env)
|
||||||
|
}
|
||||||
|
keys[k] = newVal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
153
providers/diff/diff.go
Normal file
153
providers/diff/diff.go
Normal file
|
@ -0,0 +1,153 @@
|
||||||
|
package diff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Record interface {
|
||||||
|
GetName() string
|
||||||
|
GetType() string
|
||||||
|
GetContent() string
|
||||||
|
|
||||||
|
// Get relevant comparision data. Default implentation uses "ttl [mx priority]", but providers may insert
|
||||||
|
// provider specific metadata if needed.
|
||||||
|
GetComparisionData() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Correlation struct {
|
||||||
|
Existing Record
|
||||||
|
Desired Record
|
||||||
|
}
|
||||||
|
type Changeset []Correlation
|
||||||
|
|
||||||
|
func IncrementalDiff(existing []Record, desired []Record) (unchanged, create, toDelete, modify Changeset) {
|
||||||
|
unchanged = Changeset{}
|
||||||
|
create = Changeset{}
|
||||||
|
toDelete = Changeset{}
|
||||||
|
modify = Changeset{}
|
||||||
|
|
||||||
|
// log.Printf("ID existing records: (%d)\n", len(existing))
|
||||||
|
// for i, d := range existing {
|
||||||
|
// log.Printf("\t%d\t%v\n", i, d)
|
||||||
|
// }
|
||||||
|
// log.Printf("ID desired records: (%d)\n", len(desired))
|
||||||
|
// for i, d := range desired {
|
||||||
|
// log.Printf("\t%d\t%v\n", i, d)
|
||||||
|
// }
|
||||||
|
|
||||||
|
//sort existing and desired by name
|
||||||
|
type key struct {
|
||||||
|
name, rType string
|
||||||
|
}
|
||||||
|
existingByNameAndType := map[key][]Record{}
|
||||||
|
desiredByNameAndType := map[key][]Record{}
|
||||||
|
for _, e := range existing {
|
||||||
|
k := key{e.GetName(), e.GetType()}
|
||||||
|
existingByNameAndType[k] = append(existingByNameAndType[k], e)
|
||||||
|
}
|
||||||
|
for _, d := range desired {
|
||||||
|
k := key{d.GetName(), d.GetType()}
|
||||||
|
desiredByNameAndType[k] = append(desiredByNameAndType[k], d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look through existing records. This will give us changes and deletions and some additions
|
||||||
|
for key, existingRecords := range existingByNameAndType {
|
||||||
|
desiredRecords := desiredByNameAndType[key]
|
||||||
|
|
||||||
|
//first look through records that are the same content on both sides. Those are either modifications or unchanged
|
||||||
|
|
||||||
|
for i := len(existingRecords) - 1; i >= 0; i-- {
|
||||||
|
ex := existingRecords[i]
|
||||||
|
for j, de := range desiredRecords {
|
||||||
|
if de.GetContent() == ex.GetContent() {
|
||||||
|
//they're either identical or should be a modification of each other
|
||||||
|
if de.GetComparisionData() == ex.GetComparisionData() {
|
||||||
|
unchanged = append(unchanged, Correlation{ex, de})
|
||||||
|
} else {
|
||||||
|
modify = append(modify, Correlation{ex, de})
|
||||||
|
}
|
||||||
|
// remove from both slices by index
|
||||||
|
existingRecords = existingRecords[:i+copy(existingRecords[i:], existingRecords[i+1:])]
|
||||||
|
desiredRecords = desiredRecords[:j+copy(desiredRecords[j:], desiredRecords[j+1:])]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
desiredLookup := map[string]Record{}
|
||||||
|
existingLookup := map[string]Record{}
|
||||||
|
// build index based on normalized value/ttl
|
||||||
|
for _, ex := range existingRecords {
|
||||||
|
normalized := fmt.Sprintf("%s %s", ex.GetContent(), ex.GetComparisionData())
|
||||||
|
if existingLookup[normalized] != nil {
|
||||||
|
panic(fmt.Sprintf("DUPLICATE E_RECORD FOUND: %s %s", key, normalized))
|
||||||
|
}
|
||||||
|
existingLookup[normalized] = ex
|
||||||
|
}
|
||||||
|
for _, de := range desiredRecords {
|
||||||
|
normalized := fmt.Sprintf("%s %s", de.GetContent(), de.GetComparisionData())
|
||||||
|
if desiredLookup[normalized] != nil {
|
||||||
|
panic(fmt.Sprintf("DUPLICATE D_RECORD FOUND: %s %s", key, normalized))
|
||||||
|
}
|
||||||
|
desiredLookup[normalized] = de
|
||||||
|
}
|
||||||
|
// if a record is in both, it is unchanged
|
||||||
|
for norm, ex := range existingLookup {
|
||||||
|
if de, ok := desiredLookup[norm]; ok {
|
||||||
|
unchanged = append(unchanged, Correlation{ex, de})
|
||||||
|
delete(existingLookup, norm)
|
||||||
|
delete(desiredLookup, norm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//sort records by normalized text. Keeps behaviour deterministic
|
||||||
|
existingStrings, desiredStrings := []string{}, []string{}
|
||||||
|
for norm := range existingLookup {
|
||||||
|
existingStrings = append(existingStrings, norm)
|
||||||
|
}
|
||||||
|
for norm := range desiredLookup {
|
||||||
|
desiredStrings = append(desiredStrings, norm)
|
||||||
|
}
|
||||||
|
sort.Strings(existingStrings)
|
||||||
|
sort.Strings(desiredStrings)
|
||||||
|
// Modifications. Take 1 from each side.
|
||||||
|
for len(desiredStrings) > 0 && len(existingStrings) > 0 {
|
||||||
|
modify = append(modify, Correlation{existingLookup[existingStrings[0]], desiredLookup[desiredStrings[0]]})
|
||||||
|
existingStrings = existingStrings[1:]
|
||||||
|
desiredStrings = desiredStrings[1:]
|
||||||
|
}
|
||||||
|
// If desired still has things they are additions
|
||||||
|
for _, norm := range desiredStrings {
|
||||||
|
rec := desiredLookup[norm]
|
||||||
|
create = append(create, Correlation{nil, rec})
|
||||||
|
}
|
||||||
|
// if found , but not desired, delete it
|
||||||
|
for _, norm := range existingStrings {
|
||||||
|
rec := existingLookup[norm]
|
||||||
|
toDelete = append(toDelete, Correlation{rec, nil})
|
||||||
|
}
|
||||||
|
// remove this set from the desired list to indicate we have processed it.
|
||||||
|
delete(desiredByNameAndType, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
//any name/type sets not already processed are pure additions
|
||||||
|
for name := range existingByNameAndType {
|
||||||
|
delete(desiredByNameAndType, name)
|
||||||
|
}
|
||||||
|
for _, desiredList := range desiredByNameAndType {
|
||||||
|
for _, rec := range desiredList {
|
||||||
|
create = append(create, Correlation{nil, rec})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Correlation) String() string {
|
||||||
|
if c.Existing == nil {
|
||||||
|
return fmt.Sprintf("CREATE %s %s %s %s", c.Desired.GetType(), c.Desired.GetName(), c.Desired.GetContent(), c.Desired.GetComparisionData())
|
||||||
|
}
|
||||||
|
if c.Desired == nil {
|
||||||
|
return fmt.Sprintf("DELETE %s %s %s %s", c.Existing.GetType(), c.Existing.GetName(), c.Existing.GetContent(), c.Existing.GetComparisionData())
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("MODIFY %s %s: (%s %s) -> (%s %s)", c.Existing.GetType(), c.Existing.GetName(), c.Existing.GetContent(), c.Existing.GetComparisionData(), c.Desired.GetContent(), c.Desired.GetComparisionData())
|
||||||
|
}
|
112
providers/diff/diff_test.go
Normal file
112
providers/diff/diff_test.go
Normal file
|
@ -0,0 +1,112 @@
|
||||||
|
package diff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/miekg/dns/dnsutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type myRecord string //@ A 1 1.2.3.4
|
||||||
|
|
||||||
|
func (m myRecord) GetName() string {
|
||||||
|
name := strings.SplitN(string(m), " ", 4)[0]
|
||||||
|
return dnsutil.AddOrigin(name, "example.com")
|
||||||
|
}
|
||||||
|
func (m myRecord) GetType() string {
|
||||||
|
return strings.SplitN(string(m), " ", 4)[1]
|
||||||
|
}
|
||||||
|
func (m myRecord) GetContent() string {
|
||||||
|
return strings.SplitN(string(m), " ", 4)[3]
|
||||||
|
}
|
||||||
|
func (m myRecord) GetComparisionData() string {
|
||||||
|
return fmt.Sprint(strings.SplitN(string(m), " ", 4)[2])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAdditionsOnly(t *testing.T) {
|
||||||
|
desired := []Record{
|
||||||
|
myRecord("@ A 1 1.2.3.4"),
|
||||||
|
}
|
||||||
|
existing := []Record{}
|
||||||
|
checkLengths(t, existing, desired, 0, 1, 0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeletionsOnly(t *testing.T) {
|
||||||
|
existing := []Record{
|
||||||
|
myRecord("@ A 1 1.2.3.4"),
|
||||||
|
}
|
||||||
|
desired := []Record{}
|
||||||
|
checkLengths(t, existing, desired, 0, 0, 1, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModification(t *testing.T) {
|
||||||
|
existing := []Record{
|
||||||
|
myRecord("www A 1 1.1.1.1"),
|
||||||
|
myRecord("@ A 1 1.2.3.4"),
|
||||||
|
}
|
||||||
|
desired := []Record{
|
||||||
|
myRecord("@ A 32 1.2.3.4"),
|
||||||
|
myRecord("www A 1 1.1.1.1"),
|
||||||
|
}
|
||||||
|
un, _, _, mod := checkLengths(t, existing, desired, 1, 0, 0, 1)
|
||||||
|
if t.Failed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if un[0].Desired != desired[1] || un[0].Existing != existing[0] {
|
||||||
|
t.Error("Expected unchanged records to be correlated")
|
||||||
|
}
|
||||||
|
if mod[0].Desired != desired[0] || mod[0].Existing != existing[1] {
|
||||||
|
t.Errorf("Expected modified records to be correlated")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnchangedWithAddition(t *testing.T) {
|
||||||
|
existing := []Record{
|
||||||
|
myRecord("www A 1 1.1.1.1"),
|
||||||
|
}
|
||||||
|
desired := []Record{
|
||||||
|
myRecord("www A 1 1.2.3.4"),
|
||||||
|
myRecord("www A 1 1.1.1.1"),
|
||||||
|
}
|
||||||
|
un, _, _, _ := checkLengths(t, existing, desired, 1, 1, 0, 0)
|
||||||
|
if un[0].Desired != desired[1] || un[0].Existing != existing[0] {
|
||||||
|
t.Errorf("Expected unchanged records to be correlated")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOutOfOrderRecords(t *testing.T) {
|
||||||
|
existing := []Record{
|
||||||
|
myRecord("www A 1 1.1.1.1"),
|
||||||
|
myRecord("www A 1 2.2.2.2"),
|
||||||
|
myRecord("www A 1 3.3.3.3"),
|
||||||
|
}
|
||||||
|
desired := []Record{
|
||||||
|
myRecord("www A 1 1.1.1.1"),
|
||||||
|
myRecord("www A 1 2.2.2.2"),
|
||||||
|
myRecord("www A 1 2.2.2.3"),
|
||||||
|
myRecord("www A 10 3.3.3.3"),
|
||||||
|
}
|
||||||
|
_, _, _, mods := checkLengths(t, existing, desired, 2, 1, 0, 1)
|
||||||
|
if mods[0].Desired != desired[3] || mods[0].Existing != existing[2] {
|
||||||
|
t.Fatalf("Expected to match %s and %s, but matched %s and %s", existing[2], desired[3], mods[0].Existing, mods[0].Desired)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLengths(t *testing.T, existing, desired []Record, unCount, createCount, delCount, modCount int) (un, cre, del, mod Changeset) {
|
||||||
|
un, cre, del, mod = IncrementalDiff(existing, desired)
|
||||||
|
if len(un) != unCount {
|
||||||
|
t.Errorf("Got %d unchanged records, but expected %d", len(un), unCount)
|
||||||
|
}
|
||||||
|
if len(cre) != createCount {
|
||||||
|
t.Errorf("Got %d records to create, but expected %d", len(cre), createCount)
|
||||||
|
}
|
||||||
|
if len(del) != delCount {
|
||||||
|
t.Errorf("Got %d records to delete, but expected %d", len(del), delCount)
|
||||||
|
}
|
||||||
|
if len(mod) != modCount {
|
||||||
|
t.Errorf("Got %d records to modify, but expected %d", len(mod), modCount)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
184
providers/gandi/gandiProvider.go
Normal file
184
providers/gandi/gandiProvider.go
Normal file
|
@ -0,0 +1,184 @@
|
||||||
|
package gandi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers/diff"
|
||||||
|
|
||||||
|
gandirecord "github.com/prasmussen/gandi-api/domain/zone/record"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
Gandi API DNS provider:
|
||||||
|
|
||||||
|
Info required in `creds.json`:
|
||||||
|
- apikey
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
type GandiApi struct {
|
||||||
|
ApiKey string
|
||||||
|
domainIndex map[string]int64 // Map of domainname to index
|
||||||
|
nameservers map[string][]*models.Nameserver
|
||||||
|
ZoneId int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type cfRecord struct {
|
||||||
|
gandirecord.RecordInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfRecord) GetName() string {
|
||||||
|
return c.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfRecord) GetType() string {
|
||||||
|
return c.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfRecord) GetTtl() int64 {
|
||||||
|
return c.Ttl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfRecord) GetValue() string {
|
||||||
|
return c.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfRecord) GetContent() string {
|
||||||
|
switch c.Type {
|
||||||
|
case "MX":
|
||||||
|
parts := strings.SplitN(c.Value, " ", 2)
|
||||||
|
// TODO(tlim): This should check for more errors.
|
||||||
|
return strings.Join(parts[1:], " ")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return c.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfRecord) GetComparisionData() string {
|
||||||
|
if c.Type == "MX" {
|
||||||
|
parts := strings.SplitN(c.Value, " ", 2)
|
||||||
|
priority, err := strconv.Atoi(parts[0])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("%s %#v", c.Ttl, parts[0])
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d %d", c.Ttl, priority)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d", c.Ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *GandiApi) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {
|
||||||
|
|
||||||
|
if c.domainIndex == nil {
|
||||||
|
if err := c.fetchDomainList(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok := c.domainIndex[dc.Name]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("%s not listed in zones for gandi account", dc.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
domaininfo, err := c.fetchDomainInfo(dc.Name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, nsname := range domaininfo.Nameservers {
|
||||||
|
dc.Nameservers = append(dc.Nameservers, &models.Nameserver{Name: nsname})
|
||||||
|
}
|
||||||
|
foundRecords, err := c.getZoneRecords(domaininfo.ZoneId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to []diff.Records and compare:
|
||||||
|
foundDiffRecords := make([]diff.Record, len(foundRecords))
|
||||||
|
for i, rec := range foundRecords {
|
||||||
|
n := &cfRecord{}
|
||||||
|
n.Id = 0
|
||||||
|
n.Name = rec.Name
|
||||||
|
n.Ttl = int64(rec.Ttl)
|
||||||
|
n.Type = rec.Type
|
||||||
|
n.Value = rec.Value
|
||||||
|
foundDiffRecords[i] = n
|
||||||
|
}
|
||||||
|
expectedDiffRecords := make([]diff.Record, len(dc.Records))
|
||||||
|
expectedRecordSets := make([]gandirecord.RecordSet, len(dc.Records))
|
||||||
|
for i, rec := range dc.Records {
|
||||||
|
n := &cfRecord{}
|
||||||
|
n.Id = 0
|
||||||
|
n.Name = rec.Name
|
||||||
|
n.Ttl = int64(rec.TTL)
|
||||||
|
if n.Ttl == 0 {
|
||||||
|
n.Ttl = 3600
|
||||||
|
}
|
||||||
|
n.Type = rec.Type
|
||||||
|
switch n.Type {
|
||||||
|
case "MX":
|
||||||
|
n.Value = fmt.Sprintf("%d %s", rec.Priority, rec.Target)
|
||||||
|
case "TXT":
|
||||||
|
n.Value = "\"" + rec.Target + "\"" // FIXME(tlim): Should do proper quoting.
|
||||||
|
default:
|
||||||
|
n.Value = rec.Target
|
||||||
|
}
|
||||||
|
expectedDiffRecords[i] = n
|
||||||
|
expectedRecordSets[i] = gandirecord.RecordSet{}
|
||||||
|
expectedRecordSets[i]["type"] = n.Type
|
||||||
|
expectedRecordSets[i]["name"] = n.Name
|
||||||
|
expectedRecordSets[i]["value"] = n.Value
|
||||||
|
if n.Ttl != 0 {
|
||||||
|
expectedRecordSets[i]["ttl"] = n.Ttl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, create, del, mod := diff.IncrementalDiff(foundDiffRecords, expectedDiffRecords)
|
||||||
|
|
||||||
|
// Print a list of changes. Generate an actual change that is the zone
|
||||||
|
changes := false
|
||||||
|
for _, i := range create {
|
||||||
|
changes = true
|
||||||
|
fmt.Println(i)
|
||||||
|
}
|
||||||
|
for _, i := range del {
|
||||||
|
changes = true
|
||||||
|
fmt.Println(i)
|
||||||
|
}
|
||||||
|
for _, i := range mod {
|
||||||
|
changes = true
|
||||||
|
fmt.Println(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := fmt.Sprintf("GENERATE_ZONE: %s (%d records)", dc.Name, len(expectedDiffRecords))
|
||||||
|
corrections := []*models.Correction{}
|
||||||
|
if changes {
|
||||||
|
corrections = append(corrections,
|
||||||
|
&models.Correction{
|
||||||
|
Msg: msg,
|
||||||
|
F: func() error {
|
||||||
|
fmt.Printf("CREATING ZONE: %v\n", dc.Name)
|
||||||
|
return c.createGandiZone(dc.Name, domaininfo.ZoneId, expectedRecordSets)
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return corrections, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newGandi(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {
|
||||||
|
api := &GandiApi{}
|
||||||
|
api.ApiKey = m["apikey"]
|
||||||
|
if api.ApiKey == "" {
|
||||||
|
return nil, fmt.Errorf("Gandi apikey must be provided.")
|
||||||
|
}
|
||||||
|
|
||||||
|
return api, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
providers.RegisterDomainServiceProviderType("GANDI", newGandi)
|
||||||
|
}
|
168
providers/gandi/protocol.go
Normal file
168
providers/gandi/protocol.go
Normal file
|
@ -0,0 +1,168 @@
|
||||||
|
package gandi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/providers/diff"
|
||||||
|
)
|
||||||
|
|
||||||
|
import (
|
||||||
|
gandiclient "github.com/prasmussen/gandi-api/client"
|
||||||
|
gandidomain "github.com/prasmussen/gandi-api/domain"
|
||||||
|
gandizone "github.com/prasmussen/gandi-api/domain/zone"
|
||||||
|
gandirecord "github.com/prasmussen/gandi-api/domain/zone/record"
|
||||||
|
gandiversion "github.com/prasmussen/gandi-api/domain/zone/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fetchDomainList gets list of domains for account. Cache ids for easy lookup.
|
||||||
|
func (c *GandiApi) fetchDomainList() error {
|
||||||
|
c.domainIndex = map[string]int64{}
|
||||||
|
gc := gandiclient.New(c.ApiKey, gandiclient.Production)
|
||||||
|
domain := gandidomain.New(gc)
|
||||||
|
domains, err := domain.List()
|
||||||
|
if err != nil {
|
||||||
|
// fmt.Println(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, d := range domains {
|
||||||
|
c.domainIndex[d.Fqdn] = d.Id
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetchDomainInfo gets information about a domain.
|
||||||
|
func (c *GandiApi) fetchDomainInfo(fqdn string) (*gandidomain.DomainInfo, error) {
|
||||||
|
gc := gandiclient.New(c.ApiKey, gandiclient.Production)
|
||||||
|
domain := gandidomain.New(gc)
|
||||||
|
return domain.Info(fqdn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getRecordsForDomain returns a list of records for a zone.
|
||||||
|
func (c *GandiApi) getZoneRecords(zoneid int64) ([]*gandirecord.RecordInfo, error) {
|
||||||
|
gc := gandiclient.New(c.ApiKey, gandiclient.Production)
|
||||||
|
record := gandirecord.New(gc)
|
||||||
|
return record.List(zoneid, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// listZones retrieves the list of zones.
|
||||||
|
func (c *GandiApi) listZones() ([]*gandizone.ZoneInfoBase, error) {
|
||||||
|
gc := gandiclient.New(c.ApiKey, gandiclient.Production)
|
||||||
|
zone := gandizone.New(gc)
|
||||||
|
return zone.List()
|
||||||
|
}
|
||||||
|
|
||||||
|
// setZone assigns a particular zone to a domain.
|
||||||
|
func (c *GandiApi) setZones(domainname string, zone_id int64) (*gandidomain.DomainInfo, error) {
|
||||||
|
gc := gandiclient.New(c.ApiKey, gandiclient.Production)
|
||||||
|
zone := gandizone.New(gc)
|
||||||
|
return zone.Set(domainname, zone_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getZoneInfo gets ZoneInfo about a zone.
|
||||||
|
func (c *GandiApi) getZoneInfo(zoneid int64) (*gandizone.ZoneInfo, error) {
|
||||||
|
gc := gandiclient.New(c.ApiKey, gandiclient.Production)
|
||||||
|
zone := gandizone.New(gc)
|
||||||
|
return zone.Info(zoneid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// createZone creates an entirely new zone.
|
||||||
|
func (c *GandiApi) createZone(name string) (*gandizone.ZoneInfo, error) {
|
||||||
|
gc := gandiclient.New(c.ApiKey, gandiclient.Production)
|
||||||
|
zone := gandizone.New(gc)
|
||||||
|
return zone.Create(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// replaceZoneContents
|
||||||
|
func (c *GandiApi) replaceZoneContents(zone_id int64, version_id int64, records []diff.Record) error {
|
||||||
|
return fmt.Errorf("replaceZoneContents unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *GandiApi) getEditableZone(domainname string, zoneinfo *gandizone.ZoneInfo) (int64, error) {
|
||||||
|
var zone_id int64
|
||||||
|
if zoneinfo.Domains < 2 {
|
||||||
|
// If there is only on{ domain linked to this zone, use it.
|
||||||
|
zone_id = zoneinfo.Id
|
||||||
|
fmt.Printf("Using zone id=%d named %#v\n", zone_id, zoneinfo.Name)
|
||||||
|
return zone_id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can't use the zone_id given to us. Let's make/find a new one.
|
||||||
|
zones, err := c.listZones()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
zonename := fmt.Sprintf("%s dnscontrol", domainname)
|
||||||
|
for _, z := range zones {
|
||||||
|
if z.Name == zonename {
|
||||||
|
zone_id = z.Id
|
||||||
|
fmt.Printf("Recycling zone id=%d named %#v\n", zone_id, z.Name)
|
||||||
|
return zone_id, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
zoneinfo, err = c.createZone(zonename)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
zone_id = zoneinfo.Id
|
||||||
|
fmt.Printf("Created zone id=%d named %#v\n", zone_id, zoneinfo.Name)
|
||||||
|
return zone_id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeEditableZone
|
||||||
|
func (c *GandiApi) makeEditableZone(zone_id int64) (int64, error) {
|
||||||
|
gc := gandiclient.New(c.ApiKey, gandiclient.Production)
|
||||||
|
version := gandiversion.New(gc)
|
||||||
|
return version.New(zone_id, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setZoneRecords
|
||||||
|
func (c *GandiApi) setZoneRecords(zone_id, version_id int64, records []gandirecord.RecordSet) ([]*gandirecord.RecordInfo, error) {
|
||||||
|
gc := gandiclient.New(c.ApiKey, gandiclient.Production)
|
||||||
|
record := gandirecord.New(gc)
|
||||||
|
return record.SetRecords(zone_id, version_id, records)
|
||||||
|
}
|
||||||
|
|
||||||
|
// activateVersion
|
||||||
|
func (c *GandiApi) activateVersion(zone_id, version_id int64) (bool, error) {
|
||||||
|
gc := gandiclient.New(c.ApiKey, gandiclient.Production)
|
||||||
|
version := gandiversion.New(gc)
|
||||||
|
return version.Set(zone_id, version_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *GandiApi) createGandiZone(domainname string, zone_id int64, records []gandirecord.RecordSet) error {
|
||||||
|
|
||||||
|
// Get the zone_id of the zone we'll be updating.
|
||||||
|
zoneinfo, err := c.getZoneInfo(zone_id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
//fmt.Println("ZONEINFO:", zoneinfo)
|
||||||
|
zone_id, err = c.getEditableZone(domainname, zoneinfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the version_id of the zone we're updating.
|
||||||
|
version_id, err := c.makeEditableZone(zone_id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the new version.
|
||||||
|
_, err = c.setZoneRecords(zone_id, version_id, records)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Activate zone version
|
||||||
|
_, err = c.activateVersion(zone_id, version_id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = c.setZones(domainname, zone_id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
61
providers/namecheap/namecheap.go
Normal file
61
providers/namecheap/namecheap.go
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
package namecheap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
nc "github.com/billputer/go-namecheap"
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Namecheap struct {
|
||||||
|
ApiKey string
|
||||||
|
ApiUser string
|
||||||
|
client *nc.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
providers.RegisterRegistrarType("NAMECHEAP", newReg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newReg(m map[string]string) (providers.Registrar, error) {
|
||||||
|
api := &Namecheap{}
|
||||||
|
api.ApiUser, api.ApiKey = m["apiuser"], m["apikey"]
|
||||||
|
if api.ApiKey == "" || api.ApiUser == "" {
|
||||||
|
return nil, fmt.Errorf("Namecheap apikey and apiuser must be provided.")
|
||||||
|
}
|
||||||
|
api.client = nc.NewClient(api.ApiUser, api.ApiKey, api.ApiUser)
|
||||||
|
return api, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Namecheap) GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {
|
||||||
|
info, err := n.client.DomainGetInfo(dc.Name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//todo: sort both
|
||||||
|
found := strings.Join(info.DNSDetails.Nameservers, ",")
|
||||||
|
desired := ""
|
||||||
|
for _, d := range dc.Nameservers {
|
||||||
|
if desired != "" {
|
||||||
|
desired += ","
|
||||||
|
}
|
||||||
|
desired += d.Name
|
||||||
|
}
|
||||||
|
if found != desired {
|
||||||
|
parts := strings.SplitN(dc.Name, ".", 2)
|
||||||
|
sld, tld := parts[0], parts[1]
|
||||||
|
return []*models.Correction{
|
||||||
|
{Msg: fmt.Sprintf("Change Nameservers from '%s' to '%s'", found, desired),
|
||||||
|
F: func() error {
|
||||||
|
_, err := n.client.DomainDNSSetCustom(sld, tld, desired)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
48
providers/namedotcom/namedotcom.md
Normal file
48
providers/namedotcom/namedotcom.md
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
## name.com Provider
|
||||||
|
|
||||||
|
### required config
|
||||||
|
|
||||||
|
In your providers config json file you must provide your name.com api username and access token:
|
||||||
|
|
||||||
|
```
|
||||||
|
"yourNameDotComProviderName":{
|
||||||
|
"apikey": "yourApiKeyFromName.com-klasjdkljasdlk235235235235",
|
||||||
|
"apiuser": "yourUsername"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
In order to get api access you need to [apply for access](https://www.name.com/reseller/apply)
|
||||||
|
|
||||||
|
### example dns config js (registrar only):
|
||||||
|
|
||||||
|
```
|
||||||
|
var NAMECOM = NewRegistrar("myNameCom","NAMEDOTCOM");
|
||||||
|
|
||||||
|
var mynameServers = [
|
||||||
|
NAMESERVER("bill.ns.cloudflare.com"),
|
||||||
|
NAMESERVER("fred.ns.cloudflare.com")
|
||||||
|
];
|
||||||
|
|
||||||
|
D("example.tld",NAMECOM,myNameServers
|
||||||
|
//records handled by another provider...
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### example config (registrar and records managed by namedotcom)
|
||||||
|
|
||||||
|
```
|
||||||
|
var NAMECOM = NewRegistrar("myNameCom","NAMEDOTCOM");
|
||||||
|
var NAMECOMDSP = NewDSP("myNameCom","NAMEDOTCOM")
|
||||||
|
|
||||||
|
D("exammple.tld", NAMECOM, NAMECOMDSP,
|
||||||
|
//ns[1-4].name.com used by default as nameservers
|
||||||
|
|
||||||
|
//override default ttl of 300s
|
||||||
|
DefaultTTL(3600),
|
||||||
|
|
||||||
|
A("test","1.2.3.4"),
|
||||||
|
|
||||||
|
//override ttl for one record only
|
||||||
|
CNAME("foo","some.otherdomain.tld.",TTL(100))
|
||||||
|
)
|
||||||
|
```
|
117
providers/namedotcom/namedotcomProvider.go
Normal file
117
providers/namedotcom/namedotcomProvider.go
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
//Package namedotcom implements a registrar that uses the name.com api to set name servers. It will self register it's providers when imported.
|
||||||
|
package namedotcom
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/providers"
|
||||||
|
)
|
||||||
|
|
||||||
|
type nameDotCom struct {
|
||||||
|
APIUser string `json:"apiuser"`
|
||||||
|
APIKey string `json:"apikey"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func newReg(conf map[string]string) (providers.Registrar, error) {
|
||||||
|
return newProvider(conf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDsp(conf map[string]string, meta json.RawMessage) (providers.DNSServiceProvider, error) {
|
||||||
|
return newProvider(conf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProvider(conf map[string]string) (*nameDotCom, error) {
|
||||||
|
api := &nameDotCom{}
|
||||||
|
api.APIUser, api.APIKey = conf["apiuser"], conf["apikey"]
|
||||||
|
if api.APIKey == "" || api.APIUser == "" {
|
||||||
|
return nil, fmt.Errorf("Name.com apikey and apiuser must be provided.")
|
||||||
|
}
|
||||||
|
return api, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
providers.RegisterRegistrarType("NAMEDOTCOM", newReg)
|
||||||
|
providers.RegisterDomainServiceProviderType("NAMEDOTCOM", newDsp)
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
//various http helpers for interacting with api
|
||||||
|
///
|
||||||
|
|
||||||
|
func (n *nameDotCom) addAuth(r *http.Request) {
|
||||||
|
r.Header.Add("Api-Username", n.APIUser)
|
||||||
|
r.Header.Add("Api-Token", n.APIKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
type apiResult struct {
|
||||||
|
Result struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
} `json:"result"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *apiResult) getErr() error {
|
||||||
|
if r == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if r.Result.Code != 100 {
|
||||||
|
if r.Result.Message == "" {
|
||||||
|
return fmt.Errorf("Unknown error from name.com")
|
||||||
|
}
|
||||||
|
return fmt.Errorf(r.Result.Message)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var apiBase = "https://api.name.com/api"
|
||||||
|
|
||||||
|
//perform http GET and unmarshal response json into target struct
|
||||||
|
func (n *nameDotCom) get(url string, target interface{}) error {
|
||||||
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.addAuth(req)
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
data, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return json.Unmarshal(data, target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// perform http POST, json marshalling the given data into the body
|
||||||
|
func (n *nameDotCom) post(url string, data interface{}) (*apiResult, error) {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
enc := json.NewEncoder(buf)
|
||||||
|
if err := enc.Encode(data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("POST", url, buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
n.addAuth(req)
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
text, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result := &apiResult{}
|
||||||
|
if err = json.Unmarshal(text, result); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
82
providers/namedotcom/nameservers.go
Normal file
82
providers/namedotcom/nameservers.go
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
package namedotcom
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (n *nameDotCom) GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {
|
||||||
|
foundNameservers, err := n.getNameservers(dc.Name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if defaultNsRegexp.MatchString(foundNameservers) {
|
||||||
|
foundNameservers = "ns1.name.com,ns2.name.com,ns3.name.com,ns4.name.com"
|
||||||
|
}
|
||||||
|
expected := []string{}
|
||||||
|
for _, ns := range dc.Nameservers {
|
||||||
|
name := strings.TrimRight(ns.Name, ".")
|
||||||
|
expected = append(expected, name)
|
||||||
|
}
|
||||||
|
sort.Strings(expected)
|
||||||
|
expectedNameservers := strings.Join(expected, ",")
|
||||||
|
|
||||||
|
if foundNameservers != expectedNameservers {
|
||||||
|
return []*models.Correction{
|
||||||
|
{
|
||||||
|
Msg: fmt.Sprintf("Update nameservers %s -> %s", foundNameservers, expectedNameservers),
|
||||||
|
F: n.updateNameservers(expected, dc.Name),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//even if you provide them "ns1.name.com", they will set it to "ns1qrt.name.com". This will match that pattern to see if defaults are in use.
|
||||||
|
var defaultNsRegexp = regexp.MustCompile(`ns1[a-z]{0,3}\.name\.com,ns2[a-z]{0,3}\.name\.com,ns3[a-z]{0,3}\.name\.com,ns4[a-z]{0,3}\.name\.com`)
|
||||||
|
|
||||||
|
func apiGetDomain(domain string) string {
|
||||||
|
return fmt.Sprintf("%s/domain/get/%s", apiBase, domain)
|
||||||
|
}
|
||||||
|
func apiUpdateNS(domain string) string {
|
||||||
|
return fmt.Sprintf("%s/domain/update_nameservers/%s", apiBase, domain)
|
||||||
|
}
|
||||||
|
|
||||||
|
type getDomainResult struct {
|
||||||
|
*apiResult
|
||||||
|
DomainName string `json:"domain_name"`
|
||||||
|
Nameservers []string `json:"nameservers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns comma joined list of nameservers (in alphabetical order)
|
||||||
|
func (n *nameDotCom) getNameservers(domain string) (string, error) {
|
||||||
|
result := &getDomainResult{}
|
||||||
|
if err := n.get(apiGetDomain(domain), result); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if err := result.getErr(); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
sort.Strings(result.Nameservers)
|
||||||
|
return strings.Join(result.Nameservers, ","), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *nameDotCom) updateNameservers(ns []string, domain string) func() error {
|
||||||
|
return func() error {
|
||||||
|
dat := struct {
|
||||||
|
Nameservers []string `json:"nameservers"`
|
||||||
|
}{ns}
|
||||||
|
resp, err := n.post(apiUpdateNS(domain), dat)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = resp.getErr(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
150
providers/namedotcom/nameservers_test.go
Normal file
150
providers/namedotcom/nameservers_test.go
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
package namedotcom
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
mux *http.ServeMux
|
||||||
|
client *nameDotCom
|
||||||
|
server *httptest.Server
|
||||||
|
)
|
||||||
|
|
||||||
|
func setup() {
|
||||||
|
mux = http.NewServeMux()
|
||||||
|
server = httptest.NewServer(mux)
|
||||||
|
|
||||||
|
client = &nameDotCom{
|
||||||
|
APIUser: "bob",
|
||||||
|
APIKey: "123",
|
||||||
|
}
|
||||||
|
apiBase = server.URL
|
||||||
|
}
|
||||||
|
|
||||||
|
func teardown() {
|
||||||
|
server.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetNameservers(t *testing.T) {
|
||||||
|
for i, test := range []struct {
|
||||||
|
givenNs, expected string
|
||||||
|
}{
|
||||||
|
{"", ""},
|
||||||
|
{`"foo.ns.tld","bar.ns.tld"`, "bar.ns.tld,foo.ns.tld"},
|
||||||
|
{"ERR", "ERR"},
|
||||||
|
{"MSGERR", "ERR"},
|
||||||
|
} {
|
||||||
|
setup()
|
||||||
|
defer teardown()
|
||||||
|
|
||||||
|
mux.HandleFunc("/domain/get/example.tld", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if test.givenNs == "ERR" {
|
||||||
|
http.Error(w, "UH OH", 500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if test.givenNs == "MSGERR" {
|
||||||
|
w.Write(nameComError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Write(domainResponse(test.givenNs))
|
||||||
|
})
|
||||||
|
|
||||||
|
found, err := client.getNameservers("example.tld")
|
||||||
|
if err != nil {
|
||||||
|
if test.expected == "ERR" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Errorf("Error on test %d: %s", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if test.expected == "ERR" {
|
||||||
|
t.Errorf("Expected error on test %d, but was none", i)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if found != test.expected {
|
||||||
|
t.Errorf("Test %d: Expected '%s', but found '%s'", i, test.expected, found)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetCorrections(t *testing.T) {
|
||||||
|
for i, test := range []struct {
|
||||||
|
givenNs string
|
||||||
|
expected int
|
||||||
|
}{
|
||||||
|
{"", 1},
|
||||||
|
{`"foo.ns.tld","bar.ns.tld"`, 0},
|
||||||
|
{`"bar.ns.tld","foo.ns.tld"`, 0},
|
||||||
|
{`"foo.ns.tld"`, 1},
|
||||||
|
{`"1.ns.aaa","2.ns.www"`, 1},
|
||||||
|
{"ERR", -1}, //-1 means we expect an error
|
||||||
|
{"MSGERR", -1},
|
||||||
|
} {
|
||||||
|
setup()
|
||||||
|
defer teardown()
|
||||||
|
|
||||||
|
mux.HandleFunc("/domain/get/example.tld", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if test.givenNs == "ERR" {
|
||||||
|
http.Error(w, "UH OH", 500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if test.givenNs == "MSGERR" {
|
||||||
|
w.Write(nameComError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Write(domainResponse(test.givenNs))
|
||||||
|
})
|
||||||
|
dc := &models.DomainConfig{
|
||||||
|
Name: "example.tld",
|
||||||
|
Nameservers: []*models.Nameserver{
|
||||||
|
{Name: "foo.ns.tld"},
|
||||||
|
{Name: "bar.ns.tld"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
corrections, err := client.GetRegistrarCorrections(dc)
|
||||||
|
if err != nil {
|
||||||
|
if test.expected == -1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Errorf("Error on test %d: %s", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if test.expected == -1 {
|
||||||
|
t.Errorf("Expected error on test %d, but was none", i)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(corrections) != test.expected {
|
||||||
|
t.Errorf("Test %d: Expected '%d', but found '%d'", i, test.expected, len(corrections))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func domainResponse(ns string) []byte {
|
||||||
|
return []byte(fmt.Sprintf(`{
|
||||||
|
"result": {
|
||||||
|
"code": 100,
|
||||||
|
"message": "Command Successful"
|
||||||
|
},
|
||||||
|
"domain_name": "example.tld",
|
||||||
|
"create_date": "2015-12-28 18:08:05",
|
||||||
|
"expire_date": "2016-12-28 23:59:59",
|
||||||
|
"locked": true,
|
||||||
|
"nameservers": [%s],
|
||||||
|
"contacts": [],
|
||||||
|
"addons": {
|
||||||
|
"whois_privacy": {
|
||||||
|
"price": "3.99"
|
||||||
|
},
|
||||||
|
"domain\/renew": {
|
||||||
|
"price": "10.99"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`, ns))
|
||||||
|
}
|
||||||
|
|
||||||
|
var nameComError = []byte(`{"result":{"code":251,"message":"Authentication Error - Invalid Username Or Api Token"}}`)
|
168
providers/namedotcom/records.go
Normal file
168
providers/namedotcom/records.go
Normal file
|
@ -0,0 +1,168 @@
|
||||||
|
package namedotcom
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/miekg/dns/dnsutil"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers/diff"
|
||||||
|
)
|
||||||
|
|
||||||
|
var defaultNameservers = []*models.Nameserver{
|
||||||
|
{Name: "ns1.name.com"},
|
||||||
|
{Name: "ns2.name.com"},
|
||||||
|
{Name: "ns3.name.com"},
|
||||||
|
{Name: "ns4.name.com"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *nameDotCom) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {
|
||||||
|
dc.Nameservers = defaultNameservers
|
||||||
|
records, err := n.getRecords(dc.Name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
actual := make([]diff.Record, len(records))
|
||||||
|
for i := range records {
|
||||||
|
actual[i] = records[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
desired := make([]diff.Record, len(dc.Records))
|
||||||
|
for i, rec := range dc.Records {
|
||||||
|
if rec.TTL == 0 {
|
||||||
|
rec.TTL = 300
|
||||||
|
}
|
||||||
|
desired[i] = rec
|
||||||
|
}
|
||||||
|
|
||||||
|
_, create, del, mod := diff.IncrementalDiff(actual, desired)
|
||||||
|
corrections := []*models.Correction{}
|
||||||
|
|
||||||
|
for _, d := range del {
|
||||||
|
rec := d.Existing.(*nameComRecord)
|
||||||
|
c := &models.Correction{Msg: d.String(), F: func() error { return n.deleteRecord(rec.RecordID, dc.Name) }}
|
||||||
|
corrections = append(corrections, c)
|
||||||
|
}
|
||||||
|
for _, cre := range create {
|
||||||
|
rec := cre.Desired.(*models.RecordConfig)
|
||||||
|
c := &models.Correction{Msg: cre.String(), F: func() error { return n.createRecord(rec, dc.Name) }}
|
||||||
|
corrections = append(corrections, c)
|
||||||
|
}
|
||||||
|
for _, chng := range mod {
|
||||||
|
old := chng.Existing.(*nameComRecord)
|
||||||
|
new := chng.Desired.(*models.RecordConfig)
|
||||||
|
c := &models.Correction{Msg: chng.String(), F: func() error {
|
||||||
|
err := n.deleteRecord(old.RecordID, dc.Name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return n.createRecord(new, dc.Name)
|
||||||
|
}}
|
||||||
|
corrections = append(corrections, c)
|
||||||
|
}
|
||||||
|
return corrections, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func apiGetRecords(domain string) string {
|
||||||
|
return fmt.Sprintf("%s/dns/list/%s", apiBase, domain)
|
||||||
|
}
|
||||||
|
func apiCreateRecord(domain string) string {
|
||||||
|
return fmt.Sprintf("%s/dns/create/%s", apiBase, domain)
|
||||||
|
}
|
||||||
|
func apiDeleteRecord(domain string) string {
|
||||||
|
return fmt.Sprintf("%s/dns/delete/%s", apiBase, domain)
|
||||||
|
}
|
||||||
|
|
||||||
|
type nameComRecord struct {
|
||||||
|
RecordID string `json:"record_id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
TTL string `json:"ttl"`
|
||||||
|
Priority string `json:"priority"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *nameComRecord) GetName() string {
|
||||||
|
return r.Name
|
||||||
|
}
|
||||||
|
func (r *nameComRecord) GetType() string {
|
||||||
|
return r.Type
|
||||||
|
}
|
||||||
|
func (r *nameComRecord) GetContent() string {
|
||||||
|
return r.Content
|
||||||
|
}
|
||||||
|
func (r *nameComRecord) GetComparisionData() string {
|
||||||
|
mxPrio := ""
|
||||||
|
if r.Type == "MX" {
|
||||||
|
mxPrio = fmt.Sprintf(" %s ", r.Priority)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s%s", r.TTL, mxPrio)
|
||||||
|
}
|
||||||
|
|
||||||
|
type listRecordsResponse struct {
|
||||||
|
*apiResult
|
||||||
|
Records []*nameComRecord `json:"records"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *nameDotCom) getRecords(domain string) ([]*nameComRecord, error) {
|
||||||
|
result := &listRecordsResponse{}
|
||||||
|
err := n.get(apiGetRecords(domain), result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = result.getErr(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, rc := range result.Records {
|
||||||
|
if rc.Type == "CNAME" || rc.Type == "MX" || rc.Type == "NS" {
|
||||||
|
rc.Content = rc.Content + "."
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.Records, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *nameDotCom) createRecord(rc *models.RecordConfig, domain string) error {
|
||||||
|
target := rc.Target
|
||||||
|
if rc.Type == "CNAME" || rc.Type == "MX" || rc.Type == "NS" {
|
||||||
|
if target[len(target)-1] == '.' {
|
||||||
|
target = target[:len(target)-1]
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Unexpected. CNAME/MX/NS target did not end with dot.\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dat := struct {
|
||||||
|
Hostname string `json:"hostname"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
TTL uint32 `json:"ttl,omitempty"`
|
||||||
|
Priority uint16 `json:"priority,omitempty"`
|
||||||
|
}{
|
||||||
|
Hostname: dnsutil.TrimDomainName(rc.NameFQDN, domain),
|
||||||
|
Type: rc.Type,
|
||||||
|
Content: target,
|
||||||
|
TTL: rc.TTL,
|
||||||
|
Priority: rc.Priority,
|
||||||
|
}
|
||||||
|
if dat.Hostname == "@" {
|
||||||
|
dat.Hostname = ""
|
||||||
|
}
|
||||||
|
resp, err := n.post(apiCreateRecord(domain), dat)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return resp.getErr()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *nameDotCom) deleteRecord(id, domain string) error {
|
||||||
|
dat := struct {
|
||||||
|
ID string `json:"record_id"`
|
||||||
|
}{id}
|
||||||
|
resp, err := n.post(apiDeleteRecord(domain), dat)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return resp.getErr()
|
||||||
|
}
|
119
providers/providers.go
Normal file
119
providers/providers.go
Normal file
|
@ -0,0 +1,119 @@
|
||||||
|
package providers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
//Registrar is an interface for a domain registrar. It can return a list of needed corrections to be applied in the future.
|
||||||
|
type Registrar interface {
|
||||||
|
GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
//DNSServiceProvider is able to generate a set of corrections that need to be made to correct records for a domain
|
||||||
|
type DNSServiceProvider interface {
|
||||||
|
GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
//RegistrarInitializer is a function to create a registrar. Function will be passed the unprocessed json payload from the configuration file for the given provider.
|
||||||
|
type RegistrarInitializer func(map[string]string) (Registrar, error)
|
||||||
|
|
||||||
|
var registrarTypes = map[string]RegistrarInitializer{}
|
||||||
|
|
||||||
|
//DspInitializer is a function to create a registrar. Function will be passed the unprocessed json payload from the configuration file for the given provider.
|
||||||
|
type DspInitializer func(map[string]string, json.RawMessage) (DNSServiceProvider, error)
|
||||||
|
|
||||||
|
var dspTypes = map[string]DspInitializer{}
|
||||||
|
|
||||||
|
//RegisterRegistrarType adds a registrar type to the registry by providing a suitable initialization function.
|
||||||
|
func RegisterRegistrarType(name string, init RegistrarInitializer) {
|
||||||
|
if _, ok := registrarTypes[name]; ok {
|
||||||
|
log.Fatalf("Cannot register registrar type %s multiple times", name)
|
||||||
|
}
|
||||||
|
registrarTypes[name] = init
|
||||||
|
}
|
||||||
|
|
||||||
|
//RegisterDomainServiceProviderType adds a dsp to the registry with the given initialization function.
|
||||||
|
func RegisterDomainServiceProviderType(name string, init DspInitializer) {
|
||||||
|
if _, ok := dspTypes[name]; ok {
|
||||||
|
log.Fatalf("Cannot register registrar type %s multiple times", name)
|
||||||
|
}
|
||||||
|
dspTypes[name] = init
|
||||||
|
}
|
||||||
|
|
||||||
|
func createRegistrar(rType string, config map[string]string) (Registrar, error) {
|
||||||
|
initer, ok := registrarTypes[rType]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Registrar type %s not declared.", rType)
|
||||||
|
}
|
||||||
|
return initer(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createDNSProvider(dType string, config map[string]string, meta json.RawMessage) (DNSServiceProvider, error) {
|
||||||
|
initer, ok := dspTypes[dType]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("DSP type %s not declared", dType)
|
||||||
|
}
|
||||||
|
return initer(config, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
//CreateRegistrars will load all registrars from the dns config, and create instances of the correct type using data from
|
||||||
|
//the provider config to load relevant keys and options.
|
||||||
|
func CreateRegistrars(d *models.DNSConfig, providerConfigs map[string]map[string]string) (map[string]Registrar, error) {
|
||||||
|
regs := map[string]Registrar{}
|
||||||
|
for _, reg := range d.Registrars {
|
||||||
|
rawMsg, ok := providerConfigs[reg.Name]
|
||||||
|
if !ok && reg.Type != "NONE" {
|
||||||
|
return nil, fmt.Errorf("Registrar %s not listed in -providers file.", reg.Name)
|
||||||
|
}
|
||||||
|
registrar, err := createRegistrar(reg.Type, rawMsg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
regs[reg.Name] = registrar
|
||||||
|
}
|
||||||
|
return regs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateDsps(d *models.DNSConfig, providerConfigs map[string]map[string]string) (map[string]DNSServiceProvider, error) {
|
||||||
|
dsps := map[string]DNSServiceProvider{}
|
||||||
|
for _, dsp := range d.DNSProviders {
|
||||||
|
//log.Printf("dsp.Name=%#v\n", dsp.Name)
|
||||||
|
rawMsg, ok := providerConfigs[dsp.Name]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("DNSServiceProvider %s not listed in -providers file.", dsp.Name)
|
||||||
|
}
|
||||||
|
provider, err := createDNSProvider(dsp.Type, rawMsg, dsp.Metadata)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("createDNSProvider provider=%#v\n", provider)
|
||||||
|
log.Printf("createDNSProvider err=%#v\n", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dsps[dsp.Name] = provider
|
||||||
|
}
|
||||||
|
return dsps, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// None is a basivc provider type that does absolutely nothing. Can be useful as a placeholder for third parties or unimplemented providers.
|
||||||
|
type None struct{}
|
||||||
|
|
||||||
|
func (n None) GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n None) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
RegisterRegistrarType("NONE", func(map[string]string) (Registrar, error) {
|
||||||
|
return None{}, nil
|
||||||
|
})
|
||||||
|
RegisterDomainServiceProviderType("NONE", func(map[string]string, json.RawMessage) (DNSServiceProvider, error) {
|
||||||
|
return None{}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
269
providers/route53/route53Provider.go
Normal file
269
providers/route53/route53Provider.go
Normal file
|
@ -0,0 +1,269 @@
|
||||||
|
package route53
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
r53 "github.com/aws/aws-sdk-go/service/route53"
|
||||||
|
"github.com/StackExchange/dnscontrol/models"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers"
|
||||||
|
"github.com/StackExchange/dnscontrol/providers/diff"
|
||||||
|
)
|
||||||
|
|
||||||
|
type route53Provider struct {
|
||||||
|
client *r53.Route53
|
||||||
|
zones map[string]*r53.HostedZone
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRoute53(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {
|
||||||
|
keyId, secretKey := m["KeyId"], m["SecretKey"]
|
||||||
|
if keyId == "" || secretKey == "" {
|
||||||
|
return nil, fmt.Errorf("Route53 KeyId and SecretKey must be provided.")
|
||||||
|
}
|
||||||
|
sess := session.New(&aws.Config{
|
||||||
|
Region: aws.String("us-west-2"),
|
||||||
|
Credentials: credentials.NewStaticCredentials(keyId, secretKey, ""),
|
||||||
|
})
|
||||||
|
|
||||||
|
api := &route53Provider{client: r53.New(sess)}
|
||||||
|
return api, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
providers.RegisterDomainServiceProviderType("ROUTE53", newRoute53)
|
||||||
|
}
|
||||||
|
func sPtr(s string) *string {
|
||||||
|
return &s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *route53Provider) getZones() error {
|
||||||
|
var nextMarker *string
|
||||||
|
r.zones = make(map[string]*r53.HostedZone)
|
||||||
|
for {
|
||||||
|
inp := &r53.ListHostedZonesInput{MaxItems: sPtr("1"), Marker: nextMarker}
|
||||||
|
out, err := r.client.ListHostedZones(inp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, z := range out.HostedZones {
|
||||||
|
domain := strings.TrimSuffix(*z.Name, ".")
|
||||||
|
r.zones[domain] = z
|
||||||
|
}
|
||||||
|
if out.NextMarker != nil {
|
||||||
|
nextMarker = out.NextMarker
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//map key for grouping records
|
||||||
|
type key struct {
|
||||||
|
Name, Type string
|
||||||
|
}
|
||||||
|
|
||||||
|
func getKey(r diff.Record) key {
|
||||||
|
return key{r.GetName(), r.GetType()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *route53Provider) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {
|
||||||
|
if r.zones == nil {
|
||||||
|
if err := r.getZones(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var corrections = []*models.Correction{}
|
||||||
|
zone, ok := r.zones[dc.Name]
|
||||||
|
// add zone if it doesn't exist
|
||||||
|
if !ok {
|
||||||
|
//add correction to add zone
|
||||||
|
corrections = append(corrections,
|
||||||
|
&models.Correction{
|
||||||
|
Msg: "Add zone to aws",
|
||||||
|
F: func() error {
|
||||||
|
in := &r53.CreateHostedZoneInput{
|
||||||
|
Name: &dc.Name,
|
||||||
|
CallerReference: sPtr(fmt.Sprint(time.Now().UnixNano())),
|
||||||
|
}
|
||||||
|
out, err := r.client.CreateHostedZone(in)
|
||||||
|
zone = out.HostedZone
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
})
|
||||||
|
//fake zone
|
||||||
|
zone = &r53.HostedZone{
|
||||||
|
Id: sPtr(""),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
records, err := r.fetchRecordSets(zone.Id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
//convert to dnscontrol RecordConfig format
|
||||||
|
dc.Nameservers = nil
|
||||||
|
var existingRecords = []*models.RecordConfig{}
|
||||||
|
for _, set := range records {
|
||||||
|
for _, rec := range set.ResourceRecords {
|
||||||
|
if *set.Type == "SOA" {
|
||||||
|
continue
|
||||||
|
} else if *set.Type == "NS" && strings.TrimSuffix(*set.Name, ".") == dc.Name {
|
||||||
|
dc.Nameservers = append(dc.Nameservers, &models.Nameserver{Name: strings.TrimSuffix(*rec.Value, ".")})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r := &models.RecordConfig{
|
||||||
|
NameFQDN: unescape(set.Name),
|
||||||
|
Type: *set.Type,
|
||||||
|
Target: *rec.Value,
|
||||||
|
TTL: uint32(*set.TTL),
|
||||||
|
}
|
||||||
|
existingRecords = append(existingRecords, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e, w := []diff.Record{}, []diff.Record{}
|
||||||
|
for _, ex := range existingRecords {
|
||||||
|
e = append(e, ex)
|
||||||
|
}
|
||||||
|
for _, want := range dc.Records {
|
||||||
|
if want.TTL == 0 {
|
||||||
|
want.TTL = 300
|
||||||
|
}
|
||||||
|
if want.Type == "MX" {
|
||||||
|
want.Target = fmt.Sprintf("%d %s", want.Priority, want.Target)
|
||||||
|
want.Priority = 0
|
||||||
|
} else if want.Type == "TXT" {
|
||||||
|
want.Target = fmt.Sprintf(`"%s"`, want.Target) //FIXME: better escaping/quoting
|
||||||
|
}
|
||||||
|
w = append(w, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
//diff
|
||||||
|
changeDesc := ""
|
||||||
|
_, create, delete, modify := diff.IncrementalDiff(e, w)
|
||||||
|
|
||||||
|
namesToUpdate := map[key]bool{}
|
||||||
|
for _, c := range create {
|
||||||
|
namesToUpdate[getKey(c.Desired)] = true
|
||||||
|
changeDesc += fmt.Sprintln(c)
|
||||||
|
}
|
||||||
|
for _, d := range delete {
|
||||||
|
namesToUpdate[getKey(d.Existing)] = true
|
||||||
|
changeDesc += fmt.Sprintln(d)
|
||||||
|
}
|
||||||
|
for _, m := range modify {
|
||||||
|
namesToUpdate[getKey(m.Desired)] = true
|
||||||
|
changeDesc += fmt.Sprintln(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(namesToUpdate) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
updates := map[key][]*models.RecordConfig{}
|
||||||
|
//for each name we need to update, collect relevant records from dc
|
||||||
|
for k := range namesToUpdate {
|
||||||
|
updates[k] = nil
|
||||||
|
for _, rc := range dc.Records {
|
||||||
|
if getKey(rc) == k {
|
||||||
|
updates[k] = append(updates[k], rc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
changes := []*r53.Change{}
|
||||||
|
for k, recs := range updates {
|
||||||
|
chg := &r53.Change{}
|
||||||
|
changes = append(changes, chg)
|
||||||
|
var rrset *r53.ResourceRecordSet
|
||||||
|
if len(recs) == 0 {
|
||||||
|
chg.Action = sPtr("DELETE")
|
||||||
|
// on delete just submit the original resource set we got from r53.
|
||||||
|
for _, r := range records {
|
||||||
|
if *r.Name == k.Name+"." && *r.Type == k.Type {
|
||||||
|
rrset = r
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
//on change or create, just build a new record set from our desired state
|
||||||
|
chg.Action = sPtr("UPSERT")
|
||||||
|
rrset = &r53.ResourceRecordSet{
|
||||||
|
Name: sPtr(k.Name),
|
||||||
|
Type: sPtr(k.Type),
|
||||||
|
ResourceRecords: []*r53.ResourceRecord{},
|
||||||
|
}
|
||||||
|
for _, r := range recs {
|
||||||
|
val := r.Target
|
||||||
|
rr := &r53.ResourceRecord{
|
||||||
|
Value: &val,
|
||||||
|
}
|
||||||
|
rrset.ResourceRecords = append(rrset.ResourceRecords, rr)
|
||||||
|
i := int64(r.TTL)
|
||||||
|
rrset.TTL = &i //TODO: make sure that ttls are consistent within a set
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chg.ResourceRecordSet = rrset
|
||||||
|
}
|
||||||
|
|
||||||
|
changeReq := &r53.ChangeResourceRecordSetsInput{
|
||||||
|
ChangeBatch: &r53.ChangeBatch{Changes: changes},
|
||||||
|
}
|
||||||
|
|
||||||
|
corrections = append(corrections,
|
||||||
|
&models.Correction{
|
||||||
|
Msg: changeDesc,
|
||||||
|
F: func() error {
|
||||||
|
changeReq.HostedZoneId = zone.Id
|
||||||
|
_, err := r.client.ChangeResourceRecordSets(changeReq)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return corrections, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *route53Provider) fetchRecordSets(zoneID *string) ([]*r53.ResourceRecordSet, error) {
|
||||||
|
if zoneID == nil || *zoneID == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
var next *string
|
||||||
|
var nextType *string
|
||||||
|
var records []*r53.ResourceRecordSet
|
||||||
|
for {
|
||||||
|
listInput := &r53.ListResourceRecordSetsInput{
|
||||||
|
HostedZoneId: zoneID,
|
||||||
|
StartRecordName: next,
|
||||||
|
StartRecordType: nextType,
|
||||||
|
MaxItems: sPtr("100"),
|
||||||
|
}
|
||||||
|
list, err := r.client.ListResourceRecordSets(listInput)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
records = append(records, list.ResourceRecordSets...)
|
||||||
|
if list.NextRecordName != nil {
|
||||||
|
next = list.NextRecordName
|
||||||
|
nextType = list.NextRecordType
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return records, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//we have to process names from route53 to match what we expect and to remove their odd octal encoding
|
||||||
|
func unescape(s *string) string {
|
||||||
|
if s == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
name := strings.TrimSuffix(*s, ".")
|
||||||
|
name = strings.Replace(name, `\052`, "*", -1) //TODO: escape all octal sequences
|
||||||
|
return name
|
||||||
|
}
|
24
providers/route53/route53Provider_test.go
Normal file
24
providers/route53/route53Provider_test.go
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
package route53
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestUnescape(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
experiment, expected string
|
||||||
|
}{
|
||||||
|
{"foo", "foo"},
|
||||||
|
{"foo.", "foo"},
|
||||||
|
{"foo..", "foo."},
|
||||||
|
{"foo...", "foo.."},
|
||||||
|
{`\052`, "*"},
|
||||||
|
{`\052.foo..`, "*.foo."},
|
||||||
|
// {`\053.foo`, "+.foo"}, // Not implemented yet.
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
actual := unescape(&test.experiment)
|
||||||
|
if test.expected != actual {
|
||||||
|
t.Errorf("%d: Expected %s, got %s", i, test.expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
113
transform/transform.go
Normal file
113
transform/transform.go
Normal file
|
@ -0,0 +1,113 @@
|
||||||
|
package transform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type IpConversion struct {
|
||||||
|
Low, High, NewBase net.IP
|
||||||
|
NewIPs []net.IP
|
||||||
|
}
|
||||||
|
|
||||||
|
func ipToUint(i net.IP) (uint32, error) {
|
||||||
|
parts := i.To4()
|
||||||
|
if parts == nil || len(parts) != 4 {
|
||||||
|
return 0, fmt.Errorf("%s is not an ipv4 address", parts.String())
|
||||||
|
}
|
||||||
|
r := uint32(parts[0])<<24 | uint32(parts[1])<<16 | uint32(parts[2])<<8 | uint32(parts[3])
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func UintToIP(u uint32) net.IP {
|
||||||
|
return net.IPv4(
|
||||||
|
byte((u>>24)&255),
|
||||||
|
byte((u>>16)&255),
|
||||||
|
byte((u>>8)&255),
|
||||||
|
byte((u)&255))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeTransformTable turns a string-encoded table into a list of conversions.
|
||||||
|
func DecodeTransformTable(transforms string) ([]IpConversion, error) {
|
||||||
|
result := []IpConversion{}
|
||||||
|
rows := strings.Split(transforms, ";")
|
||||||
|
for ri, row := range rows {
|
||||||
|
items := strings.Split(row, "~")
|
||||||
|
if len(items) != 4 {
|
||||||
|
return nil, fmt.Errorf("transform_table rows should have 4 elements. (%v) found in row (%v) of %#v\n", len(items), ri, transforms)
|
||||||
|
}
|
||||||
|
for i, item := range items {
|
||||||
|
items[i] = strings.TrimSpace(item)
|
||||||
|
}
|
||||||
|
|
||||||
|
con := IpConversion{
|
||||||
|
Low: net.ParseIP(items[0]),
|
||||||
|
High: net.ParseIP(items[1]),
|
||||||
|
NewBase: net.ParseIP(items[2]),
|
||||||
|
}
|
||||||
|
for _, ip := range strings.Split(items[3], ",") {
|
||||||
|
if ip == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addr := net.ParseIP(ip)
|
||||||
|
if addr == nil {
|
||||||
|
return nil, fmt.Errorf("%s is not a valid ip address", ip)
|
||||||
|
}
|
||||||
|
con.NewIPs = append(con.NewIPs, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
low, _ := ipToUint(con.Low)
|
||||||
|
high, _ := ipToUint(con.High)
|
||||||
|
if low > high {
|
||||||
|
return nil, fmt.Errorf("transform_table Low should be less than High. row (%v) %v>%v (%v)\n", ri, con.Low, con.High, transforms)
|
||||||
|
}
|
||||||
|
if con.NewBase != nil && con.NewIPs != nil {
|
||||||
|
return nil, fmt.Errorf("transform_table_rows should only specify one of NewBase or NewIP. Not both.")
|
||||||
|
}
|
||||||
|
result = append(result, con)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransformIP transforms a single ip address. If the transform results in multiple new targets, an error will be returned.
|
||||||
|
func TransformIP(address net.IP, transforms []IpConversion) (net.IP, error) {
|
||||||
|
ips, err := TransformIPToList(address, transforms)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(ips) != 1 {
|
||||||
|
return nil, fmt.Errorf("Expect exactly one ip for TransformIP result. Got: %s", ips)
|
||||||
|
}
|
||||||
|
return ips[0], err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransformIPToList manipulates an net.IP based on a list of IpConversions. It can potentially expand one ip address into multiple addresses.
|
||||||
|
func TransformIPToList(address net.IP, transforms []IpConversion) ([]net.IP, error) {
|
||||||
|
thisIP, err := ipToUint(address)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, conv := range transforms {
|
||||||
|
min, err := ipToUint(conv.Low)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
max, err := ipToUint(conv.High)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if (thisIP >= min) && (thisIP <= max) {
|
||||||
|
if conv.NewIPs != nil {
|
||||||
|
return conv.NewIPs, nil
|
||||||
|
}
|
||||||
|
newbase, err := ipToUint(conv.NewBase)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []net.IP{UintToIP(newbase + (thisIP - min))}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return []net.IP{address}, nil
|
||||||
|
}
|
213
transform/transform_test.go
Normal file
213
transform/transform_test.go
Normal file
|
@ -0,0 +1,213 @@
|
||||||
|
package transform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIPToUint(t *testing.T) {
|
||||||
|
ip := net.ParseIP("1.2.3.4")
|
||||||
|
u, err := ipToUint(ip)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if u != 16909060 {
|
||||||
|
t.Fatalf("I to uint conversion failed. Should be 16909060. Got %d", u)
|
||||||
|
}
|
||||||
|
ip2 := UintToIP(u)
|
||||||
|
if !ip.Equal(ip2) {
|
||||||
|
t.Fatalf("IPs should be equal. %s is not %s", ip2, ip)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_DecodeTransformTable_failures(t *testing.T) {
|
||||||
|
result, err := DecodeTransformTable("1.2.3.4 ~ 3.4.5.6")
|
||||||
|
if result != nil {
|
||||||
|
t.Errorf("expected nil, got (%v)\n", result)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expect error, got none")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func test_ip(t *testing.T, test string, expected string, actual net.IP) {
|
||||||
|
if !net.ParseIP(expected).Equal(actual) {
|
||||||
|
t.Errorf("Test %v: expected Low (%v), got (%v)\n", test, actual, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_DecodeTransformTable_0(t *testing.T) {
|
||||||
|
result, err := DecodeTransformTable("1.2.3.4 ~ 2.3.4.5 ~ 3.4.5.6 ~ ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(result) != 1 {
|
||||||
|
t.Errorf("Test %v: expected col length (%v), got (%v)\n", 1, 1, len(result))
|
||||||
|
}
|
||||||
|
test_ip(t, "low", "1.2.3.4", result[0].Low)
|
||||||
|
test_ip(t, "high", "2.3.4.5", result[0].High)
|
||||||
|
test_ip(t, "newBase", "3.4.5.6", result[0].NewBase)
|
||||||
|
//test_ip(t, "newIP", "", result[0].NewIPs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_DecodeTransformTable_1(t *testing.T) {
|
||||||
|
result, err := DecodeTransformTable("1.2.3.4~2.3.4.5~3.4.5.6 ~;8.7.6.5 ~ 9.8.7.6 ~ 7.6.5.4 ~ ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(result) != 2 {
|
||||||
|
t.Errorf("Test %v: expected col length (%v), got (%v)\n", 1, 2, len(result))
|
||||||
|
}
|
||||||
|
test_ip(t, "Low[0]", "1.2.3.4", result[0].Low)
|
||||||
|
test_ip(t, "High[0]", "2.3.4.5", result[0].High)
|
||||||
|
test_ip(t, "NewBase[0]", "3.4.5.6", result[0].NewBase)
|
||||||
|
//test_ip(t, "newIP[0]", "", result[0].NewIP)
|
||||||
|
test_ip(t, "Low[1]", "8.7.6.5", result[1].Low)
|
||||||
|
test_ip(t, "High[1]", "9.8.7.6", result[1].High)
|
||||||
|
test_ip(t, "NewBase[1]", "7.6.5.4", result[1].NewBase)
|
||||||
|
//test_ip(t, "newIP[1]", "", result[0].NewIP)
|
||||||
|
}
|
||||||
|
func Test_DecodeTransformTable_NewIP(t *testing.T) {
|
||||||
|
result, err := DecodeTransformTable("1.2.3.4 ~ 2.3.4.5 ~ ~ 3.4.5.6 ")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(result) != 1 {
|
||||||
|
t.Errorf("Test %v: expected col length (%v), got (%v)\n", 1, 1, len(result))
|
||||||
|
}
|
||||||
|
test_ip(t, "low", "1.2.3.4", result[0].Low)
|
||||||
|
test_ip(t, "high", "2.3.4.5", result[0].High)
|
||||||
|
//test_ip(t, "newIP", "3.4.5.6", result[0].NewIP)
|
||||||
|
test_ip(t, "newBase", "", result[0].NewBase)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_DecodeTransformTable_order(t *testing.T) {
|
||||||
|
raw := "9.8.7.6 ~ 8.7.6.5 ~ 7.6.5.4 ~"
|
||||||
|
result, err := DecodeTransformTable(raw)
|
||||||
|
if result != nil {
|
||||||
|
t.Errorf("Invalid range not detected: (%v)\n", raw)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expect error, got none")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_DecodeTransformTable_Base_and_IP(t *testing.T) {
|
||||||
|
raw := "1.1.1.1~ 8.7.6.5 ~ 7.6.5.4 ~ 4.4.4.4"
|
||||||
|
result, err := DecodeTransformTable(raw)
|
||||||
|
if result != nil {
|
||||||
|
t.Errorf("NewBase and NewIP should not both be specified: (%v)\n", raw)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expect error, got none")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_TransformIP(t *testing.T) {
|
||||||
|
|
||||||
|
var transforms1 = []IpConversion{{
|
||||||
|
Low: net.ParseIP("11.11.11.0"),
|
||||||
|
High: net.ParseIP("11.11.11.20"),
|
||||||
|
NewBase: net.ParseIP("99.99.99.0"),
|
||||||
|
}, {
|
||||||
|
Low: net.ParseIP("22.22.22.0"),
|
||||||
|
High: net.ParseIP("22.22.22.40"),
|
||||||
|
NewBase: net.ParseIP("99.99.99.100"),
|
||||||
|
}, {
|
||||||
|
Low: net.ParseIP("33.33.33.20"),
|
||||||
|
High: net.ParseIP("33.33.35.40"),
|
||||||
|
NewBase: net.ParseIP("100.100.100.0"),
|
||||||
|
}, {
|
||||||
|
Low: net.ParseIP("44.44.44.20"),
|
||||||
|
High: net.ParseIP("44.44.44.40"),
|
||||||
|
NewBase: net.ParseIP("100.100.100.40"),
|
||||||
|
}}
|
||||||
|
|
||||||
|
var tests = []struct {
|
||||||
|
experiment string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{"11.11.11.0", "99.99.99.0"},
|
||||||
|
{"11.11.11.1", "99.99.99.1"},
|
||||||
|
{"11.11.11.11", "99.99.99.11"},
|
||||||
|
{"11.11.11.19", "99.99.99.19"},
|
||||||
|
{"11.11.11.20", "99.99.99.20"},
|
||||||
|
{"11.11.11.21", "11.11.11.21"},
|
||||||
|
{"22.22.22.22", "99.99.99.122"},
|
||||||
|
{"22.22.22.255", "22.22.22.255"},
|
||||||
|
{"33.33.33.0", "33.33.33.0"},
|
||||||
|
{"33.33.33.19", "33.33.33.19"},
|
||||||
|
{"33.33.33.20", "100.100.100.0"},
|
||||||
|
{"33.33.33.21", "100.100.100.1"},
|
||||||
|
{"33.33.33.33", "100.100.100.13"},
|
||||||
|
{"33.33.35.39", "100.100.102.19"},
|
||||||
|
{"33.33.35.40", "100.100.102.20"},
|
||||||
|
{"33.33.35.41", "33.33.35.41"},
|
||||||
|
{"44.44.44.24", "100.100.100.44"},
|
||||||
|
{"44.44.44.44", "44.44.44.44"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
experiment := net.ParseIP(test.experiment)
|
||||||
|
expected := net.ParseIP(test.expected)
|
||||||
|
actual, err := TransformIP(experiment, transforms1)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%v: got an err: %v\n", experiment, err)
|
||||||
|
}
|
||||||
|
if !expected.Equal(actual) {
|
||||||
|
t.Errorf("%v: expected (%v) got (%v)\n", experiment, expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_TransformIP_NewIP(t *testing.T) {
|
||||||
|
|
||||||
|
var transforms1 = []IpConversion{{
|
||||||
|
Low: net.ParseIP("11.11.11.0"),
|
||||||
|
High: net.ParseIP("11.11.11.20"),
|
||||||
|
NewIPs: []net.IP{net.ParseIP("1.1.1.1")},
|
||||||
|
}, {
|
||||||
|
Low: net.ParseIP("22.22.22.0"),
|
||||||
|
High: net.ParseIP("22.22.22.40"),
|
||||||
|
NewIPs: []net.IP{net.ParseIP("2.2.2.2")},
|
||||||
|
}, {
|
||||||
|
Low: net.ParseIP("33.33.33.20"),
|
||||||
|
High: net.ParseIP("33.33.35.40"),
|
||||||
|
NewIPs: []net.IP{net.ParseIP("3.3.3.3")},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var tests = []struct {
|
||||||
|
experiment string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{"11.11.11.0", "1.1.1.1"},
|
||||||
|
{"11.11.11.1", "1.1.1.1"},
|
||||||
|
{"11.11.11.11", "1.1.1.1"},
|
||||||
|
{"11.11.11.19", "1.1.1.1"},
|
||||||
|
{"11.11.11.20", "1.1.1.1"},
|
||||||
|
{"11.11.11.21", "11.11.11.21"},
|
||||||
|
{"22.22.22.22", "2.2.2.2"},
|
||||||
|
{"22.22.22.255", "22.22.22.255"},
|
||||||
|
{"33.33.33.0", "33.33.33.0"},
|
||||||
|
{"33.33.33.19", "33.33.33.19"},
|
||||||
|
{"33.33.33.20", "3.3.3.3"},
|
||||||
|
{"33.33.33.21", "3.3.3.3"},
|
||||||
|
{"33.33.33.33", "3.3.3.3"},
|
||||||
|
{"33.33.35.39", "3.3.3.3"},
|
||||||
|
{"33.33.35.40", "3.3.3.3"},
|
||||||
|
{"33.33.35.41", "33.33.35.41"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
experiment := net.ParseIP(test.experiment)
|
||||||
|
expected := net.ParseIP(test.expected)
|
||||||
|
actual, err := TransformIP(experiment, transforms1)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%v: got an err: %v\n", experiment, err)
|
||||||
|
}
|
||||||
|
if !expected.Equal(actual) {
|
||||||
|
t.Errorf("%v: expected (%v) got (%v)\n", experiment, expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
4
vendor/github.com/DisposaBoy/JsonConfigReader/AUTHORS.md
generated
vendored
Normal file
4
vendor/github.com/DisposaBoy/JsonConfigReader/AUTHORS.md
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
This is the official list of JsonConfigReader authors for copyright purposes.
|
||||||
|
|
||||||
|
* DisposaBoy `https://github.com/DisposaBoy`
|
||||||
|
* Steven Osborn `https://github.com/steve918`
|
7
vendor/github.com/DisposaBoy/JsonConfigReader/LICENSE.md
generated
vendored
Normal file
7
vendor/github.com/DisposaBoy/JsonConfigReader/LICENSE.md
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
Copyright (c) 2012 The JsonConfigReader Authors
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
36
vendor/github.com/DisposaBoy/JsonConfigReader/README.md
generated
vendored
Normal file
36
vendor/github.com/DisposaBoy/JsonConfigReader/README.md
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
JsonConfigReader is a proxy for [golang's io.Reader](http://golang.org/pkg/io/#Reader) that strips line comments and trailing commas, allowing you to use json as a *reasonable* config format.
|
||||||
|
|
||||||
|
Comments start with `//` and continue to the end of the line.
|
||||||
|
|
||||||
|
If a trailing comma is in front of `]` or `}` it will be stripped as well.
|
||||||
|
|
||||||
|
|
||||||
|
Given `settings.json`
|
||||||
|
|
||||||
|
{
|
||||||
|
"key": "value", // k:v
|
||||||
|
|
||||||
|
// a list of numbers
|
||||||
|
"list": [1, 2, 3],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
You can read it in as a *normal* json file:
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/DisposaBoy/JsonConfigReader"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var v interface{}
|
||||||
|
f, _ := os.Open("settings.json")
|
||||||
|
// wrap our reader before passing it to the json decoder
|
||||||
|
r := JsonConfigReader.New(f)
|
||||||
|
json.NewDecoder(r).Decode(&v)
|
||||||
|
fmt.Println(v)
|
||||||
|
}
|
94
vendor/github.com/DisposaBoy/JsonConfigReader/reader.go
generated
vendored
Normal file
94
vendor/github.com/DisposaBoy/JsonConfigReader/reader.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
package JsonConfigReader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type state struct {
|
||||||
|
r io.Reader
|
||||||
|
br *bytes.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNL(c byte) bool {
|
||||||
|
return c == '\n' || c == '\r'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWS(c byte) bool {
|
||||||
|
return c == ' ' || c == '\t' || isNL(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func consumeComment(s []byte, i int) int {
|
||||||
|
if i < len(s) && s[i] == '/' {
|
||||||
|
s[i-1] = ' '
|
||||||
|
for ; i < len(s) && !isNL(s[i]); i += 1 {
|
||||||
|
s[i] = ' '
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func prep(r io.Reader) (s []byte, err error) {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
_, err = io.Copy(buf, r)
|
||||||
|
s = buf.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for i < len(s) {
|
||||||
|
switch s[i] {
|
||||||
|
case '"':
|
||||||
|
i += 1
|
||||||
|
for i < len(s) {
|
||||||
|
if s[i] == '"' {
|
||||||
|
i += 1
|
||||||
|
break
|
||||||
|
} else if s[i] == '\\' {
|
||||||
|
i += 1
|
||||||
|
}
|
||||||
|
i += 1
|
||||||
|
}
|
||||||
|
case '/':
|
||||||
|
i = consumeComment(s, i+1)
|
||||||
|
case ',':
|
||||||
|
j := i
|
||||||
|
for {
|
||||||
|
i += 1
|
||||||
|
if i >= len(s) {
|
||||||
|
break
|
||||||
|
} else if s[i] == '}' || s[i] == ']' {
|
||||||
|
s[j] = ' '
|
||||||
|
break
|
||||||
|
} else if s[i] == '/' {
|
||||||
|
i = consumeComment(s, i+1)
|
||||||
|
} else if !isWS(s[i]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
i += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read acts as a proxy for the underlying reader and cleans p
|
||||||
|
// of comments and trailing commas preceeding ] and }
|
||||||
|
// comments are delimitted by // up until the end the line
|
||||||
|
func (st *state) Read(p []byte) (n int, err error) {
|
||||||
|
if st.br == nil {
|
||||||
|
var s []byte
|
||||||
|
if s, err = prep(st.r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
st.br = bytes.NewReader(s)
|
||||||
|
}
|
||||||
|
return st.br.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns an io.Reader acting as proxy to r
|
||||||
|
func New(r io.Reader) io.Reader {
|
||||||
|
return &state{r: r}
|
||||||
|
}
|
13
vendor/github.com/NYTimes/gziphandler/LICENSE.md
generated
vendored
Normal file
13
vendor/github.com/NYTimes/gziphandler/LICENSE.md
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
Copyright (c) 2015 The New York Times Company
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this library except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
52
vendor/github.com/NYTimes/gziphandler/README.md
generated
vendored
Normal file
52
vendor/github.com/NYTimes/gziphandler/README.md
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
Gzip Handler
|
||||||
|
============
|
||||||
|
|
||||||
|
This is a tiny Go package which wraps HTTP handlers to transparently gzip the
|
||||||
|
response body, for clients which support it. Although it's usually simpler to
|
||||||
|
leave that to a reverse proxy (like nginx or Varnish), this package is useful
|
||||||
|
when that's undesirable.
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Call `GzipHandler` with any handler (an object which implements the
|
||||||
|
`http.Handler` interface), and it'll return a new handler which gzips the
|
||||||
|
response. For example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"github.com/NYTimes/gziphandler"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
|
io.WriteString(w, "Hello, World")
|
||||||
|
})
|
||||||
|
|
||||||
|
withGz := gziphandler.GzipHandler(withoutGz)
|
||||||
|
|
||||||
|
http.Handle("/", withGz)
|
||||||
|
http.ListenAndServe("0.0.0.0:8000", nil)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
The docs can be found at [godoc.org] [docs], as usual.
|
||||||
|
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
[Apache 2.0] [license].
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[docs]: https://godoc.org/github.com/nytimes/gziphandler
|
||||||
|
[license]: https://github.com/nytimes/gziphandler/blob/master/LICENSE.md
|
200
vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
Normal file
200
vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
Normal file
|
@ -0,0 +1,200 @@
|
||||||
|
package gziphandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
vary = "Vary"
|
||||||
|
acceptEncoding = "Accept-Encoding"
|
||||||
|
contentEncoding = "Content-Encoding"
|
||||||
|
)
|
||||||
|
|
||||||
|
type codings map[string]float64
|
||||||
|
|
||||||
|
// The default qvalue to assign to an encoding if no explicit qvalue is set.
|
||||||
|
// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
|
||||||
|
// The examples seem to indicate that it is.
|
||||||
|
const DEFAULT_QVALUE = 1.0
|
||||||
|
|
||||||
|
// gzipWriterPools stores a sync.Pool for each compression level for re-uze of gzip.Writers.
|
||||||
|
// Use poolIndex to covert a compression level to an index into gzipWriterPools.
|
||||||
|
var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ {
|
||||||
|
addLevelPool(i)
|
||||||
|
}
|
||||||
|
addLevelPool(gzip.DefaultCompression)
|
||||||
|
}
|
||||||
|
|
||||||
|
// poolIndex maps a compression level to its index into gzipWriterPools. It assumes that
|
||||||
|
// level is a valid gzip compression level.
|
||||||
|
func poolIndex(level int) int {
|
||||||
|
// gzip.DefaultCompression == -1, so we need to treat it special.
|
||||||
|
if level == gzip.DefaultCompression {
|
||||||
|
return gzip.BestCompression - gzip.BestSpeed + 1
|
||||||
|
}
|
||||||
|
return level - gzip.BestSpeed
|
||||||
|
}
|
||||||
|
|
||||||
|
func addLevelPool(level int) {
|
||||||
|
gzipWriterPools[poolIndex(level)] = &sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
// NewWriterLevel only returns error on a bad level, we are guaranteeing
|
||||||
|
// that this will be a valid level so it is okay to ignore the returned
|
||||||
|
// error.
|
||||||
|
w, _ := gzip.NewWriterLevel(nil, level)
|
||||||
|
return w
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GzipResponseWriter provides an http.ResponseWriter interface, which gzips
|
||||||
|
// bytes before writing them to the underlying response. This doesn't set the
|
||||||
|
// Content-Encoding header, nor close the writers, so don't forget to do that.
|
||||||
|
type GzipResponseWriter struct {
|
||||||
|
gw *gzip.Writer
|
||||||
|
http.ResponseWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write appends data to the gzip writer.
|
||||||
|
func (w GzipResponseWriter) Write(b []byte) (int, error) {
|
||||||
|
if _, ok := w.Header()["Content-Type"]; !ok {
|
||||||
|
// If content type is not set, infer it from the uncompressed body.
|
||||||
|
w.Header().Set("Content-Type", http.DetectContentType(b))
|
||||||
|
}
|
||||||
|
return w.gw.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush flushes the underlying *gzip.Writer and then the underlying
|
||||||
|
// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
|
||||||
|
// an http.Flusher.
|
||||||
|
func (w GzipResponseWriter) Flush() {
|
||||||
|
w.gw.Flush()
|
||||||
|
if fw, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||||
|
fw.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in an error case
|
||||||
|
// it panics rather than returning an error.
|
||||||
|
func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler {
|
||||||
|
wrap, err := NewGzipLevelHandler(level)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return wrap
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGzipLevelHandler returns a wrapper function (often known as middleware)
|
||||||
|
// which can be used to wrap an HTTP handler to transparently gzip the response
|
||||||
|
// body if the client supports it (via the Accept-Encoding header). Responses will
|
||||||
|
// be encoded at the given gzip compression level. An error will be returned only
|
||||||
|
// if an invalid gzip compression level is given, so if one can ensure the level
|
||||||
|
// is valid, the returned error can be safely ignored.
|
||||||
|
func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
|
||||||
|
if level != gzip.DefaultCompression && (level < gzip.BestSpeed || level > gzip.BestCompression) {
|
||||||
|
return nil, fmt.Errorf("invalid compression level requested: %d", level)
|
||||||
|
}
|
||||||
|
return func(h http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Add(vary, acceptEncoding)
|
||||||
|
|
||||||
|
if acceptsGzip(r) {
|
||||||
|
// Bytes written during ServeHTTP are redirected to this gzip writer
|
||||||
|
// before being written to the underlying response.
|
||||||
|
gzw := gzipWriterPools[poolIndex(level)].Get().(*gzip.Writer)
|
||||||
|
defer gzipWriterPools[poolIndex(level)].Put(gzw)
|
||||||
|
gzw.Reset(w)
|
||||||
|
defer gzw.Close()
|
||||||
|
|
||||||
|
w.Header().Set(contentEncoding, "gzip")
|
||||||
|
h.ServeHTTP(GzipResponseWriter{gzw, w}, r)
|
||||||
|
} else {
|
||||||
|
h.ServeHTTP(w, r)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GzipHandler wraps an HTTP handler, to transparently gzip the response body if
|
||||||
|
// the client supports it (via the Accept-Encoding header). This will compress at
|
||||||
|
// the default compression level.
|
||||||
|
func GzipHandler(h http.Handler) http.Handler {
|
||||||
|
wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression)
|
||||||
|
return wrapper(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// acceptsGzip returns true if the given HTTP request indicates that it will
|
||||||
|
// accept a gzippped response.
|
||||||
|
func acceptsGzip(r *http.Request) bool {
|
||||||
|
acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding))
|
||||||
|
return acceptedEncodings["gzip"] > 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseEncodings attempts to parse a list of codings, per RFC 2616, as might
|
||||||
|
// appear in an Accept-Encoding header. It returns a map of content-codings to
|
||||||
|
// quality values, and an error containing the errors encounted. It's probably
|
||||||
|
// safe to ignore those, because silently ignoring errors is how the internet
|
||||||
|
// works.
|
||||||
|
//
|
||||||
|
// See: http://tools.ietf.org/html/rfc2616#section-14.3
|
||||||
|
func parseEncodings(s string) (codings, error) {
|
||||||
|
c := make(codings)
|
||||||
|
e := make([]string, 0)
|
||||||
|
|
||||||
|
for _, ss := range strings.Split(s, ",") {
|
||||||
|
coding, qvalue, err := parseCoding(ss)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
e = append(e, err.Error())
|
||||||
|
|
||||||
|
} else {
|
||||||
|
c[coding] = qvalue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO (adammck): Use a proper multi-error struct, so the individual errors
|
||||||
|
// can be extracted if anyone cares.
|
||||||
|
if len(e) > 0 {
|
||||||
|
return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseCoding parses a single conding (content-coding with an optional qvalue),
|
||||||
|
// as might appear in an Accept-Encoding header. It attempts to forgive minor
|
||||||
|
// formatting errors.
|
||||||
|
func parseCoding(s string) (coding string, qvalue float64, err error) {
|
||||||
|
for n, part := range strings.Split(s, ";") {
|
||||||
|
part = strings.TrimSpace(part)
|
||||||
|
qvalue = DEFAULT_QVALUE
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
coding = strings.ToLower(part)
|
||||||
|
|
||||||
|
} else if strings.HasPrefix(part, "q=") {
|
||||||
|
qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64)
|
||||||
|
|
||||||
|
if qvalue < 0.0 {
|
||||||
|
qvalue = 0.0
|
||||||
|
|
||||||
|
} else if qvalue > 1.0 {
|
||||||
|
qvalue = 1.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if coding == "" {
|
||||||
|
err = fmt.Errorf("empty content-coding")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
27
vendor/github.com/TomOnTime/utfutil/LICENSE
generated
vendored
Normal file
27
vendor/github.com/TomOnTime/utfutil/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright (c) 2016, Tom Limoncelli
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
* Neither the name of utfutil nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
70
vendor/github.com/TomOnTime/utfutil/README.md
generated
vendored
Normal file
70
vendor/github.com/TomOnTime/utfutil/README.md
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
# utfutil
|
||||||
|
|
||||||
|
Utilities to make it easier to read text encoded as UTF-16.
|
||||||
|
|
||||||
|
## Dealing with UTF-16 files from Windows.
|
||||||
|
|
||||||
|
Ever have code that worked for years until you received a file from a MS-Windows system that just didn't work at all? Looking at a hex dump you realize every other byte is \0. WTF? No, UTF. More specifically UTF-16LE with an optional BOM.
|
||||||
|
|
||||||
|
What does all that mean? Well, first you should read ["The Absolute Minimum Every Software Developer Absolutely, Positively Must Know About Unicode and Character Sets (No Excuses!)"](http://www.joelonsoftware.com/articles/Unicode.html) by Joel Spolsky.
|
||||||
|
|
||||||
|
Now you are an expert. You can spend an afternoon trying to figure out how the heck to put all that together and use `golang.org/x/text/encoding/unicode` to decode UTF-16LE. However I've already done that for you. Now you can take the easy way out change ioutil.ReadFile() to utfutil.ReadFile(). Everything will just work.
|
||||||
|
|
||||||
|
### utfutil.ReadFile() is the equivalent of ioutil.ReadFile()
|
||||||
|
|
||||||
|
OLD: Works with UTF8 and ASCII files:
|
||||||
|
|
||||||
|
```
|
||||||
|
data, err := ioutil.ReadFile(filename)
|
||||||
|
```
|
||||||
|
|
||||||
|
NEW: Works if someone gives you a Windows UTF-16LE file occasionally but normally you are processing UTF8 files:
|
||||||
|
|
||||||
|
```
|
||||||
|
data, err := utfutil.ReadFile(filename, utfutil.UTF8)
|
||||||
|
```
|
||||||
|
|
||||||
|
### utfutil.OpenFile() is the equivalent of os.Open().
|
||||||
|
|
||||||
|
OLD: Works with UTF8 and ASCII files:
|
||||||
|
|
||||||
|
```
|
||||||
|
data, err := os.Open(filename)
|
||||||
|
```
|
||||||
|
|
||||||
|
NEW: Works if someone gives you a file with a BOM:
|
||||||
|
|
||||||
|
```
|
||||||
|
data, err := utfutil.OpenFile(filename, utfutil.HTML5)
|
||||||
|
```
|
||||||
|
|
||||||
|
### utfutil.NewScanner() is for reading files line-by-line
|
||||||
|
|
||||||
|
It works like os.Open():
|
||||||
|
|
||||||
|
```
|
||||||
|
s, err := utfutil.NewScanner(filename, utfutil.HTML5)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Encoding hints:
|
||||||
|
|
||||||
|
What's that second argument all about?
|
||||||
|
|
||||||
|
Since it is impossible to guess 100% correctly if there is no BOM,
|
||||||
|
the functions take a 2nd parameter of type "EncodingHint" where you
|
||||||
|
specify the default encoding for BOM-less files.
|
||||||
|
|
||||||
|
```
|
||||||
|
UTF8 No BOM? Assume UTF-8
|
||||||
|
UTF16LE No BOM? Assume UTF 16 Little Endian
|
||||||
|
UTF16BE No BOM? Assume UTF 16 Big Endian
|
||||||
|
WINDOWS = UTF16LE (i.e. a reasonable guess if file is from MS-Windows)
|
||||||
|
POSIX = UTF8 (i.e. a reasonable guess if file is from Unix or Unix-like systems)
|
||||||
|
HTML5 = UTF8 (i.e. a reasonable guess if file is from the web)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Future Directions
|
||||||
|
|
||||||
|
If someone writes a golang equivalent of uchatdet, I'll add a hint
|
||||||
|
called "AUTO" which uses it. That would be awesome. Volunteers?
|
110
vendor/github.com/TomOnTime/utfutil/utfutil.go
generated
vendored
Normal file
110
vendor/github.com/TomOnTime/utfutil/utfutil.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
// Package utfutil provides methods that make it easy to read data in an UTF-encoding agnostic.
|
||||||
|
package utfutil
|
||||||
|
|
||||||
|
// These functions autodetect UTF BOM and return UTF-8. If no
|
||||||
|
// BOM is found, a hint is provided as to which encoding to assume.
|
||||||
|
// You can use them as replacements for os.Open() and ioutil.ReadFile()
|
||||||
|
// when the encoding of the file is unknown.
|
||||||
|
|
||||||
|
// utfutil.OpenFile() is a replacement for os.Open().
|
||||||
|
// utfutil.ReadFile() is a replacement for ioutil.ReadFile().
|
||||||
|
// utfutil.NewScanner() takes a filename and returns a Scanner.
|
||||||
|
// utfutil.NewReader() rewraps an existing scanner to make it UTF-encoding agnostic.
|
||||||
|
// utfutil.BytesReader() takes a []byte and decodes it to UTF-8.
|
||||||
|
|
||||||
|
// Since it is impossible to guess 100% correctly if there is no BOM,
|
||||||
|
// the functions take a 2nd parameter of type "EncodingHint" where you
|
||||||
|
// specify the default encoding for BOM-less data.
|
||||||
|
|
||||||
|
// If someone writes a golang equivalent of uchatdet, I'll add
|
||||||
|
// a hint called "AUTO" which uses it.
|
||||||
|
|
||||||
|
// Inspiration: I wrote this after spending half a day trying
|
||||||
|
// to figure out how to use unicode.BOMOverride.
|
||||||
|
// Hopefully this will save other golang newbies from the same.
|
||||||
|
// (golang.org/x/text/encoding/unicode)
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/text/encoding"
|
||||||
|
"golang.org/x/text/encoding/unicode"
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncodingHint indicates the file's encoding if there is no BOM.
|
||||||
|
type EncodingHint int
|
||||||
|
|
||||||
|
const (
|
||||||
|
UTF8 EncodingHint = iota // UTF-8
|
||||||
|
UTF16LE // UTF 16 Little Endian
|
||||||
|
UTF16BE // UTF 16 Big Endian
|
||||||
|
WINDOWS = UTF16LE // File came from a MS-Windows system
|
||||||
|
POSIX = UTF8 // File came from Unix or Unix-like systems
|
||||||
|
HTML5 = UTF8 // File came from the web
|
||||||
|
)
|
||||||
|
|
||||||
|
// About utfutil.HTML5:
|
||||||
|
// This technique is recommended by the W3C for use in HTML 5:
|
||||||
|
// "For compatibility with deployed content, the byte order
|
||||||
|
// mark (also known as BOM) is considered more authoritative
|
||||||
|
// than anything else." http://www.w3.org/TR/encoding/#specification-hooks
|
||||||
|
|
||||||
|
// OpenFile is the equivalent of os.Open().
|
||||||
|
func OpenFile(name string, d EncodingHint) (io.Reader, error) {
|
||||||
|
f, err := os.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewReader(f, d), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFile is the equivalent of ioutil.ReadFile()
|
||||||
|
func ReadFile(name string, d EncodingHint) ([]byte, error) {
|
||||||
|
file, err := OpenFile(name, d)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ioutil.ReadAll(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewScanner is a convenience function that takes a filename and returns a scanner.
|
||||||
|
func NewScanner(name string, d EncodingHint) (*bufio.Scanner, error) {
|
||||||
|
f, err := OpenFile(name, d)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bufio.NewScanner(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader wraps a Reader to decode Unicode to UTF-8 as it reads.
|
||||||
|
func NewReader(r io.Reader, d EncodingHint) io.Reader {
|
||||||
|
var decoder *encoding.Decoder
|
||||||
|
switch d {
|
||||||
|
case UTF8:
|
||||||
|
// Make a transformer that assumes UTF-8 but abides by the BOM.
|
||||||
|
decoder = unicode.UTF8.NewDecoder()
|
||||||
|
case UTF16LE:
|
||||||
|
// Make an tranformer that decodes MS-Windows (16LE) UTF files:
|
||||||
|
winutf := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
|
||||||
|
// Make a transformer that is like winutf, but abides by BOM if found:
|
||||||
|
decoder = winutf.NewDecoder()
|
||||||
|
case UTF16BE:
|
||||||
|
// Make an tranformer that decodes UTF-16BE files:
|
||||||
|
utf16be := unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM)
|
||||||
|
// Make a transformer that is like utf16be, but abides by BOM if found:
|
||||||
|
decoder = utf16be.NewDecoder()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a Reader that uses utf16bom:
|
||||||
|
return transform.NewReader(r, unicode.BOMOverride(decoder))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesReader is a convenience function that takes a []byte and decodes them to UTF-8.
|
||||||
|
func BytesReader(b []byte, d EncodingHint) io.Reader {
|
||||||
|
return NewReader(bytes.NewReader(b), d)
|
||||||
|
}
|
202
vendor/github.com/aws/aws-sdk-go/LICENSE.txt
generated
vendored
Normal file
202
vendor/github.com/aws/aws-sdk-go/LICENSE.txt
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
3
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
Normal file
3
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
AWS SDK for Go
|
||||||
|
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
Copyright 2014-2015 Stripe, Inc.
|
145
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
Normal file
145
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
||||||
|
// Package awserr represents API error interface accessors for the SDK.
|
||||||
|
package awserr
|
||||||
|
|
||||||
|
// An Error wraps lower level errors with code, message and an original error.
|
||||||
|
// The underlying concrete error type may also satisfy other interfaces which
|
||||||
|
// can be to used to obtain more specific information about the error.
|
||||||
|
//
|
||||||
|
// Calling Error() or String() will always include the full information about
|
||||||
|
// an error based on its underlying type.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// output, err := s3manage.Upload(svc, input, opts)
|
||||||
|
// if err != nil {
|
||||||
|
// if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
// // Get error details
|
||||||
|
// log.Println("Error:", awsErr.Code(), awsErr.Message())
|
||||||
|
//
|
||||||
|
// // Prints out full error message, including original error if there was one.
|
||||||
|
// log.Println("Error:", awsErr.Error())
|
||||||
|
//
|
||||||
|
// // Get original error
|
||||||
|
// if origErr := awsErr.OrigErr(); origErr != nil {
|
||||||
|
// // operate on original error.
|
||||||
|
// }
|
||||||
|
// } else {
|
||||||
|
// fmt.Println(err.Error())
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type Error interface {
|
||||||
|
// Satisfy the generic error interface.
|
||||||
|
error
|
||||||
|
|
||||||
|
// Returns the short phrase depicting the classification of the error.
|
||||||
|
Code() string
|
||||||
|
|
||||||
|
// Returns the error details message.
|
||||||
|
Message() string
|
||||||
|
|
||||||
|
// Returns the original error if one was set. Nil is returned if not set.
|
||||||
|
OrigErr() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchError is a batch of errors which also wraps lower level errors with
|
||||||
|
// code, message, and original errors. Calling Error() will include all errors
|
||||||
|
// that occurred in the batch.
|
||||||
|
//
|
||||||
|
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
|
||||||
|
// compatibility.
|
||||||
|
type BatchError interface {
|
||||||
|
// Satisfy the generic error interface.
|
||||||
|
error
|
||||||
|
|
||||||
|
// Returns the short phrase depicting the classification of the error.
|
||||||
|
Code() string
|
||||||
|
|
||||||
|
// Returns the error details message.
|
||||||
|
Message() string
|
||||||
|
|
||||||
|
// Returns the original error if one was set. Nil is returned if not set.
|
||||||
|
OrigErrs() []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchedErrors is a batch of errors which also wraps lower level errors with
|
||||||
|
// code, message, and original errors. Calling Error() will include all errors
|
||||||
|
// that occurred in the batch.
|
||||||
|
//
|
||||||
|
// Replaces BatchError
|
||||||
|
type BatchedErrors interface {
|
||||||
|
// Satisfy the base Error interface.
|
||||||
|
Error
|
||||||
|
|
||||||
|
// Returns the original error if one was set. Nil is returned if not set.
|
||||||
|
OrigErrs() []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns an Error object described by the code, message, and origErr.
|
||||||
|
//
|
||||||
|
// If origErr satisfies the Error interface it will not be wrapped within a new
|
||||||
|
// Error object and will instead be returned.
|
||||||
|
func New(code, message string, origErr error) Error {
|
||||||
|
var errs []error
|
||||||
|
if origErr != nil {
|
||||||
|
errs = append(errs, origErr)
|
||||||
|
}
|
||||||
|
return newBaseError(code, message, errs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBatchError returns an BatchedErrors with a collection of errors as an
|
||||||
|
// array of errors.
|
||||||
|
func NewBatchError(code, message string, errs []error) BatchedErrors {
|
||||||
|
return newBaseError(code, message, errs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A RequestFailure is an interface to extract request failure information from
|
||||||
|
// an Error such as the request ID of the failed request returned by a service.
|
||||||
|
// RequestFailures may not always have a requestID value if the request failed
|
||||||
|
// prior to reaching the service such as a connection error.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// output, err := s3manage.Upload(svc, input, opts)
|
||||||
|
// if err != nil {
|
||||||
|
// if reqerr, ok := err.(RequestFailure); ok {
|
||||||
|
// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
|
||||||
|
// } else {
|
||||||
|
// log.Println("Error:", err.Error())
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Combined with awserr.Error:
|
||||||
|
//
|
||||||
|
// output, err := s3manage.Upload(svc, input, opts)
|
||||||
|
// if err != nil {
|
||||||
|
// if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
// // Generic AWS Error with Code, Message, and original error (if any)
|
||||||
|
// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||||
|
//
|
||||||
|
// if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||||
|
// // A service error occurred
|
||||||
|
// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
|
||||||
|
// }
|
||||||
|
// } else {
|
||||||
|
// fmt.Println(err.Error())
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type RequestFailure interface {
|
||||||
|
Error
|
||||||
|
|
||||||
|
// The status code of the HTTP response.
|
||||||
|
StatusCode() int
|
||||||
|
|
||||||
|
// The request ID returned by the service for a request failure. This will
|
||||||
|
// be empty if no request ID is available such as the request failed due
|
||||||
|
// to a connection error.
|
||||||
|
RequestID() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRequestFailure returns a new request error wrapper for the given Error
|
||||||
|
// provided.
|
||||||
|
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
|
||||||
|
return newRequestError(err, statusCode, reqID)
|
||||||
|
}
|
194
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
Normal file
194
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
Normal file
|
@ -0,0 +1,194 @@
|
||||||
|
package awserr
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// SprintError returns a string of the formatted error code.
|
||||||
|
//
|
||||||
|
// Both extra and origErr are optional. If they are included their lines
|
||||||
|
// will be added, but if they are not included their lines will be ignored.
|
||||||
|
func SprintError(code, message, extra string, origErr error) string {
|
||||||
|
msg := fmt.Sprintf("%s: %s", code, message)
|
||||||
|
if extra != "" {
|
||||||
|
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
|
||||||
|
}
|
||||||
|
if origErr != nil {
|
||||||
|
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// A baseError wraps the code and message which defines an error. It also
|
||||||
|
// can be used to wrap an original error object.
|
||||||
|
//
|
||||||
|
// Should be used as the root for errors satisfying the awserr.Error. Also
|
||||||
|
// for any error which does not fit into a specific error wrapper type.
|
||||||
|
type baseError struct {
|
||||||
|
// Classification of error
|
||||||
|
code string
|
||||||
|
|
||||||
|
// Detailed information about error
|
||||||
|
message string
|
||||||
|
|
||||||
|
// Optional original error this error is based off of. Allows building
|
||||||
|
// chained errors.
|
||||||
|
errs []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBaseError returns an error object for the code, message, and errors.
|
||||||
|
//
|
||||||
|
// code is a short no whitespace phrase depicting the classification of
|
||||||
|
// the error that is being created.
|
||||||
|
//
|
||||||
|
// message is the free flow string containing detailed information about the
|
||||||
|
// error.
|
||||||
|
//
|
||||||
|
// origErrs is the error objects which will be nested under the new errors to
|
||||||
|
// be returned.
|
||||||
|
func newBaseError(code, message string, origErrs []error) *baseError {
|
||||||
|
b := &baseError{
|
||||||
|
code: code,
|
||||||
|
message: message,
|
||||||
|
errs: origErrs,
|
||||||
|
}
|
||||||
|
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the string representation of the error.
|
||||||
|
//
|
||||||
|
// See ErrorWithExtra for formatting.
|
||||||
|
//
|
||||||
|
// Satisfies the error interface.
|
||||||
|
func (b baseError) Error() string {
|
||||||
|
size := len(b.errs)
|
||||||
|
if size > 0 {
|
||||||
|
return SprintError(b.code, b.message, "", errorList(b.errs))
|
||||||
|
}
|
||||||
|
|
||||||
|
return SprintError(b.code, b.message, "", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string representation of the error.
|
||||||
|
// Alias for Error to satisfy the stringer interface.
|
||||||
|
func (b baseError) String() string {
|
||||||
|
return b.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code returns the short phrase depicting the classification of the error.
|
||||||
|
func (b baseError) Code() string {
|
||||||
|
return b.code
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message returns the error details message.
|
||||||
|
func (b baseError) Message() string {
|
||||||
|
return b.message
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErr returns the original error if one was set. Nil is returned if no
|
||||||
|
// error was set. This only returns the first element in the list. If the full
|
||||||
|
// list is needed, use BatchedErrors.
|
||||||
|
func (b baseError) OrigErr() error {
|
||||||
|
switch len(b.errs) {
|
||||||
|
case 0:
|
||||||
|
return nil
|
||||||
|
case 1:
|
||||||
|
return b.errs[0]
|
||||||
|
default:
|
||||||
|
if err, ok := b.errs[0].(Error); ok {
|
||||||
|
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
|
||||||
|
}
|
||||||
|
return NewBatchError("BatchedErrors",
|
||||||
|
"multiple errors occurred", b.errs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||||
|
// returned if no error was set.
|
||||||
|
func (b baseError) OrigErrs() []error {
|
||||||
|
return b.errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// So that the Error interface type can be included as an anonymous field
|
||||||
|
// in the requestError struct and not conflict with the error.Error() method.
|
||||||
|
type awsError Error
|
||||||
|
|
||||||
|
// A requestError wraps a request or service error.
|
||||||
|
//
|
||||||
|
// Composed of baseError for code, message, and original error.
|
||||||
|
type requestError struct {
|
||||||
|
awsError
|
||||||
|
statusCode int
|
||||||
|
requestID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// newRequestError returns a wrapped error with additional information for
|
||||||
|
// request status code, and service requestID.
|
||||||
|
//
|
||||||
|
// Should be used to wrap all request which involve service requests. Even if
|
||||||
|
// the request failed without a service response, but had an HTTP status code
|
||||||
|
// that may be meaningful.
|
||||||
|
//
|
||||||
|
// Also wraps original errors via the baseError.
|
||||||
|
func newRequestError(err Error, statusCode int, requestID string) *requestError {
|
||||||
|
return &requestError{
|
||||||
|
awsError: err,
|
||||||
|
statusCode: statusCode,
|
||||||
|
requestID: requestID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the string representation of the error.
|
||||||
|
// Satisfies the error interface.
|
||||||
|
func (r requestError) Error() string {
|
||||||
|
extra := fmt.Sprintf("status code: %d, request id: %s",
|
||||||
|
r.statusCode, r.requestID)
|
||||||
|
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string representation of the error.
|
||||||
|
// Alias for Error to satisfy the stringer interface.
|
||||||
|
func (r requestError) String() string {
|
||||||
|
return r.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusCode returns the wrapped status code for the error
|
||||||
|
func (r requestError) StatusCode() int {
|
||||||
|
return r.statusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestID returns the wrapped requestID
|
||||||
|
func (r requestError) RequestID() string {
|
||||||
|
return r.requestID
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||||
|
// returned if no error was set.
|
||||||
|
func (r requestError) OrigErrs() []error {
|
||||||
|
if b, ok := r.awsError.(BatchedErrors); ok {
|
||||||
|
return b.OrigErrs()
|
||||||
|
}
|
||||||
|
return []error{r.OrigErr()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An error list that satisfies the golang interface
|
||||||
|
type errorList []error
|
||||||
|
|
||||||
|
// Error returns the string representation of the error.
|
||||||
|
//
|
||||||
|
// Satisfies the error interface.
|
||||||
|
func (e errorList) Error() string {
|
||||||
|
msg := ""
|
||||||
|
// How do we want to handle the array size being zero
|
||||||
|
if size := len(e); size > 0 {
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
msg += fmt.Sprintf("%s", e[i].Error())
|
||||||
|
// We check the next index to see if it is within the slice.
|
||||||
|
// If it is, then we append a newline. We do this, because unit tests
|
||||||
|
// could be broken with the additional '\n'
|
||||||
|
if i+1 < size {
|
||||||
|
msg += "\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
100
vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
Normal file
100
vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
package awsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Copy deeply copies a src structure to dst. Useful for copying request and
|
||||||
|
// response structures.
|
||||||
|
//
|
||||||
|
// Can copy between structs of different type, but will only copy fields which
|
||||||
|
// are assignable, and exist in both structs. Fields which are not assignable,
|
||||||
|
// or do not exist in both structs are ignored.
|
||||||
|
func Copy(dst, src interface{}) {
|
||||||
|
dstval := reflect.ValueOf(dst)
|
||||||
|
if !dstval.IsValid() {
|
||||||
|
panic("Copy dst cannot be nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
rcopy(dstval, reflect.ValueOf(src), true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyOf returns a copy of src while also allocating the memory for dst.
|
||||||
|
// src must be a pointer type or this operation will fail.
|
||||||
|
func CopyOf(src interface{}) (dst interface{}) {
|
||||||
|
dsti := reflect.New(reflect.TypeOf(src).Elem())
|
||||||
|
dst = dsti.Interface()
|
||||||
|
rcopy(dsti, reflect.ValueOf(src), true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// rcopy performs a recursive copy of values from the source to destination.
|
||||||
|
//
|
||||||
|
// root is used to skip certain aspects of the copy which are not valid
|
||||||
|
// for the root node of a object.
|
||||||
|
func rcopy(dst, src reflect.Value, root bool) {
|
||||||
|
if !src.IsValid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch src.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
if _, ok := src.Interface().(io.Reader); ok {
|
||||||
|
if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
|
||||||
|
dst.Elem().Set(src)
|
||||||
|
} else if dst.CanSet() {
|
||||||
|
dst.Set(src)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
e := src.Type().Elem()
|
||||||
|
if dst.CanSet() && !src.IsNil() {
|
||||||
|
dst.Set(reflect.New(e))
|
||||||
|
}
|
||||||
|
if src.Elem().IsValid() {
|
||||||
|
// Keep the current root state since the depth hasn't changed
|
||||||
|
rcopy(dst.Elem(), src.Elem(), root)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
t := dst.Type()
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
name := t.Field(i).Name
|
||||||
|
srcVal := src.FieldByName(name)
|
||||||
|
dstVal := dst.FieldByName(name)
|
||||||
|
if srcVal.IsValid() && dstVal.CanSet() {
|
||||||
|
rcopy(dstVal, srcVal, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
if src.IsNil() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
|
||||||
|
dst.Set(s)
|
||||||
|
for i := 0; i < src.Len(); i++ {
|
||||||
|
rcopy(dst.Index(i), src.Index(i), false)
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
if src.IsNil() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
s := reflect.MakeMap(src.Type())
|
||||||
|
dst.Set(s)
|
||||||
|
for _, k := range src.MapKeys() {
|
||||||
|
v := src.MapIndex(k)
|
||||||
|
v2 := reflect.New(v.Type()).Elem()
|
||||||
|
rcopy(v2, v, false)
|
||||||
|
dst.SetMapIndex(k, v2)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Assign the value if possible. If its not assignable, the value would
|
||||||
|
// need to be converted and the impact of that may be unexpected, or is
|
||||||
|
// not compatible with the dst type.
|
||||||
|
if src.Type().AssignableTo(dst.Type()) {
|
||||||
|
dst.Set(src)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
27
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
Normal file
27
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
package awsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
|
||||||
|
// In addition to this, this method will also dereference the input values if
|
||||||
|
// possible so the DeepEqual performed will not fail if one parameter is a
|
||||||
|
// pointer and the other is not.
|
||||||
|
//
|
||||||
|
// DeepEqual will not perform indirection of nested values of the input parameters.
|
||||||
|
func DeepEqual(a, b interface{}) bool {
|
||||||
|
ra := reflect.Indirect(reflect.ValueOf(a))
|
||||||
|
rb := reflect.Indirect(reflect.ValueOf(b))
|
||||||
|
|
||||||
|
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
|
||||||
|
// If the elements are both nil, and of the same type the are equal
|
||||||
|
// If they are of different types they are not equal
|
||||||
|
return reflect.TypeOf(a) == reflect.TypeOf(b)
|
||||||
|
} else if raValid != rbValid {
|
||||||
|
// Both values must be valid to be equal
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return reflect.DeepEqual(ra.Interface(), rb.Interface())
|
||||||
|
}
|
222
vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
Normal file
222
vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
Normal file
|
@ -0,0 +1,222 @@
|
||||||
|
package awsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/jmespath/go-jmespath"
|
||||||
|
)
|
||||||
|
|
||||||
|
var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
|
||||||
|
|
||||||
|
// rValuesAtPath returns a slice of values found in value v. The values
|
||||||
|
// in v are explored recursively so all nested values are collected.
|
||||||
|
func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
|
||||||
|
pathparts := strings.Split(path, "||")
|
||||||
|
if len(pathparts) > 1 {
|
||||||
|
for _, pathpart := range pathparts {
|
||||||
|
vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
|
||||||
|
if len(vals) > 0 {
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
|
||||||
|
components := strings.Split(path, ".")
|
||||||
|
for len(values) > 0 && len(components) > 0 {
|
||||||
|
var index *int64
|
||||||
|
var indexStar bool
|
||||||
|
c := strings.TrimSpace(components[0])
|
||||||
|
if c == "" { // no actual component, illegal syntax
|
||||||
|
return nil
|
||||||
|
} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
|
||||||
|
// TODO normalize case for user
|
||||||
|
return nil // don't support unexported fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse this component
|
||||||
|
if m := indexRe.FindStringSubmatch(c); m != nil {
|
||||||
|
c = m[1]
|
||||||
|
if m[2] == "" {
|
||||||
|
index = nil
|
||||||
|
indexStar = true
|
||||||
|
} else {
|
||||||
|
i, _ := strconv.ParseInt(m[2], 10, 32)
|
||||||
|
index = &i
|
||||||
|
indexStar = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nextvals := []reflect.Value{}
|
||||||
|
for _, value := range values {
|
||||||
|
// pull component name out of struct member
|
||||||
|
if value.Kind() != reflect.Struct {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if c == "*" { // pull all members
|
||||||
|
for i := 0; i < value.NumField(); i++ {
|
||||||
|
if f := reflect.Indirect(value.Field(i)); f.IsValid() {
|
||||||
|
nextvals = append(nextvals, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
value = value.FieldByNameFunc(func(name string) bool {
|
||||||
|
if c == name {
|
||||||
|
return true
|
||||||
|
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
|
||||||
|
if !value.IsNil() {
|
||||||
|
value.Set(reflect.Zero(value.Type()))
|
||||||
|
}
|
||||||
|
return []reflect.Value{value}
|
||||||
|
}
|
||||||
|
|
||||||
|
if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
|
||||||
|
// TODO if the value is the terminus it should not be created
|
||||||
|
// if the value to be set to its position is nil.
|
||||||
|
value.Set(reflect.New(value.Type().Elem()))
|
||||||
|
value = value.Elem()
|
||||||
|
} else {
|
||||||
|
value = reflect.Indirect(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||||
|
if !createPath && value.IsNil() {
|
||||||
|
value = reflect.ValueOf(nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if value.IsValid() {
|
||||||
|
nextvals = append(nextvals, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
values = nextvals
|
||||||
|
|
||||||
|
if indexStar || index != nil {
|
||||||
|
nextvals = []reflect.Value{}
|
||||||
|
for _, value := range values {
|
||||||
|
value := reflect.Indirect(value)
|
||||||
|
if value.Kind() != reflect.Slice {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if indexStar { // grab all indices
|
||||||
|
for i := 0; i < value.Len(); i++ {
|
||||||
|
idx := reflect.Indirect(value.Index(i))
|
||||||
|
if idx.IsValid() {
|
||||||
|
nextvals = append(nextvals, idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// pull out index
|
||||||
|
i := int(*index)
|
||||||
|
if i >= value.Len() { // check out of bounds
|
||||||
|
if createPath {
|
||||||
|
// TODO resize slice
|
||||||
|
} else {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if i < 0 { // support negative indexing
|
||||||
|
i = value.Len() + i
|
||||||
|
}
|
||||||
|
value = reflect.Indirect(value.Index(i))
|
||||||
|
|
||||||
|
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||||
|
if !createPath && value.IsNil() {
|
||||||
|
value = reflect.ValueOf(nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if value.IsValid() {
|
||||||
|
nextvals = append(nextvals, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
values = nextvals
|
||||||
|
}
|
||||||
|
|
||||||
|
components = components[1:]
|
||||||
|
}
|
||||||
|
return values
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValuesAtPath returns a list of values at the case insensitive lexical
|
||||||
|
// path inside of a structure.
|
||||||
|
func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
|
||||||
|
result, err := jmespath.Search(path, i)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
v := reflect.ValueOf(result)
|
||||||
|
if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if s, ok := result.([]interface{}); ok {
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
if v.Kind() == reflect.Map && v.Len() == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if v.Kind() == reflect.Slice {
|
||||||
|
out := make([]interface{}, v.Len())
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
out[i] = v.Index(i).Interface()
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return []interface{}{result}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValueAtPath sets a value at the case insensitive lexical path inside
|
||||||
|
// of a structure.
|
||||||
|
func SetValueAtPath(i interface{}, path string, v interface{}) {
|
||||||
|
if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
|
||||||
|
for _, rval := range rvals {
|
||||||
|
if rval.Kind() == reflect.Ptr && rval.IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
setValue(rval, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setValue(dstVal reflect.Value, src interface{}) {
|
||||||
|
if dstVal.Kind() == reflect.Ptr {
|
||||||
|
dstVal = reflect.Indirect(dstVal)
|
||||||
|
}
|
||||||
|
srcVal := reflect.ValueOf(src)
|
||||||
|
|
||||||
|
if !srcVal.IsValid() { // src is literal nil
|
||||||
|
if dstVal.CanAddr() {
|
||||||
|
// Convert to pointer so that pointer's value can be nil'ed
|
||||||
|
// dstVal = dstVal.Addr()
|
||||||
|
}
|
||||||
|
dstVal.Set(reflect.Zero(dstVal.Type()))
|
||||||
|
|
||||||
|
} else if srcVal.Kind() == reflect.Ptr {
|
||||||
|
if srcVal.IsNil() {
|
||||||
|
srcVal = reflect.Zero(dstVal.Type())
|
||||||
|
} else {
|
||||||
|
srcVal = reflect.ValueOf(src).Elem()
|
||||||
|
}
|
||||||
|
dstVal.Set(srcVal)
|
||||||
|
} else {
|
||||||
|
dstVal.Set(srcVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
107
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
Normal file
107
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
||||||
|
package awsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Prettify returns the string representation of a value.
|
||||||
|
func Prettify(i interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
prettify(reflect.ValueOf(i), 0, &buf)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// prettify will recursively walk value v to build a textual
|
||||||
|
// representation of the value.
|
||||||
|
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
|
||||||
|
for v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
strtype := v.Type().String()
|
||||||
|
if strtype == "time.Time" {
|
||||||
|
fmt.Fprintf(buf, "%s", v.Interface())
|
||||||
|
break
|
||||||
|
} else if strings.HasPrefix(strtype, "io.") {
|
||||||
|
buf.WriteString("<buffer>")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
|
||||||
|
names := []string{}
|
||||||
|
for i := 0; i < v.Type().NumField(); i++ {
|
||||||
|
name := v.Type().Field(i).Name
|
||||||
|
f := v.Field(i)
|
||||||
|
if name[0:1] == strings.ToLower(name[0:1]) {
|
||||||
|
continue // ignore unexported fields
|
||||||
|
}
|
||||||
|
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
|
||||||
|
continue // ignore unset fields
|
||||||
|
}
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range names {
|
||||||
|
val := v.FieldByName(n)
|
||||||
|
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||||
|
buf.WriteString(n + ": ")
|
||||||
|
prettify(val, indent+2, buf)
|
||||||
|
|
||||||
|
if i < len(names)-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||||
|
case reflect.Slice:
|
||||||
|
nl, id, id2 := "", "", ""
|
||||||
|
if v.Len() > 3 {
|
||||||
|
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
||||||
|
}
|
||||||
|
buf.WriteString("[" + nl)
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
buf.WriteString(id2)
|
||||||
|
prettify(v.Index(i), indent+2, buf)
|
||||||
|
|
||||||
|
if i < v.Len()-1 {
|
||||||
|
buf.WriteString("," + nl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString(nl + id + "]")
|
||||||
|
case reflect.Map:
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
|
||||||
|
for i, k := range v.MapKeys() {
|
||||||
|
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||||
|
buf.WriteString(k.String() + ": ")
|
||||||
|
prettify(v.MapIndex(k), indent+2, buf)
|
||||||
|
|
||||||
|
if i < v.Len()-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||||
|
default:
|
||||||
|
if !v.IsValid() {
|
||||||
|
fmt.Fprint(buf, "<invalid value>")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
format := "%v"
|
||||||
|
switch v.Interface().(type) {
|
||||||
|
case string:
|
||||||
|
format = "%q"
|
||||||
|
case io.ReadSeeker, io.Reader:
|
||||||
|
format = "buffer(%p)"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(buf, format, v.Interface())
|
||||||
|
}
|
||||||
|
}
|
89
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
Normal file
89
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
package awsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StringValue returns the string representation of a value.
|
||||||
|
func StringValue(i interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
stringValue(reflect.ValueOf(i), 0, &buf)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
|
||||||
|
for v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
|
||||||
|
names := []string{}
|
||||||
|
for i := 0; i < v.Type().NumField(); i++ {
|
||||||
|
name := v.Type().Field(i).Name
|
||||||
|
f := v.Field(i)
|
||||||
|
if name[0:1] == strings.ToLower(name[0:1]) {
|
||||||
|
continue // ignore unexported fields
|
||||||
|
}
|
||||||
|
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
|
||||||
|
continue // ignore unset fields
|
||||||
|
}
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range names {
|
||||||
|
val := v.FieldByName(n)
|
||||||
|
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||||
|
buf.WriteString(n + ": ")
|
||||||
|
stringValue(val, indent+2, buf)
|
||||||
|
|
||||||
|
if i < len(names)-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||||
|
case reflect.Slice:
|
||||||
|
nl, id, id2 := "", "", ""
|
||||||
|
if v.Len() > 3 {
|
||||||
|
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
||||||
|
}
|
||||||
|
buf.WriteString("[" + nl)
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
buf.WriteString(id2)
|
||||||
|
stringValue(v.Index(i), indent+2, buf)
|
||||||
|
|
||||||
|
if i < v.Len()-1 {
|
||||||
|
buf.WriteString("," + nl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString(nl + id + "]")
|
||||||
|
case reflect.Map:
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
|
||||||
|
for i, k := range v.MapKeys() {
|
||||||
|
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||||
|
buf.WriteString(k.String() + ": ")
|
||||||
|
stringValue(v.MapIndex(k), indent+2, buf)
|
||||||
|
|
||||||
|
if i < v.Len()-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||||
|
default:
|
||||||
|
format := "%v"
|
||||||
|
switch v.Interface().(type) {
|
||||||
|
case string:
|
||||||
|
format = "%q"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(buf, format, v.Interface())
|
||||||
|
}
|
||||||
|
}
|
120
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
Normal file
120
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
Normal file
|
@ -0,0 +1,120 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http/httputil"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Config provides configuration to a service client instance.
|
||||||
|
type Config struct {
|
||||||
|
Config *aws.Config
|
||||||
|
Handlers request.Handlers
|
||||||
|
Endpoint, SigningRegion string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigProvider provides a generic way for a service client to receive
|
||||||
|
// the ClientConfig without circular dependencies.
|
||||||
|
type ConfigProvider interface {
|
||||||
|
ClientConfig(serviceName string, cfgs ...*aws.Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Client implements the base client request and response handling
|
||||||
|
// used by all service clients.
|
||||||
|
type Client struct {
|
||||||
|
request.Retryer
|
||||||
|
metadata.ClientInfo
|
||||||
|
|
||||||
|
Config aws.Config
|
||||||
|
Handlers request.Handlers
|
||||||
|
}
|
||||||
|
|
||||||
|
// New will return a pointer to a new initialized service client.
|
||||||
|
func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
|
||||||
|
svc := &Client{
|
||||||
|
Config: cfg,
|
||||||
|
ClientInfo: info,
|
||||||
|
Handlers: handlers,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch retryer, ok := cfg.Retryer.(request.Retryer); {
|
||||||
|
case ok:
|
||||||
|
svc.Retryer = retryer
|
||||||
|
case cfg.Retryer != nil && cfg.Logger != nil:
|
||||||
|
s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
|
||||||
|
cfg.Logger.Log(s)
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
maxRetries := aws.IntValue(cfg.MaxRetries)
|
||||||
|
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
||||||
|
maxRetries = 3
|
||||||
|
}
|
||||||
|
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
|
||||||
|
}
|
||||||
|
|
||||||
|
svc.AddDebugHandlers()
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(svc)
|
||||||
|
}
|
||||||
|
|
||||||
|
return svc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRequest returns a new Request pointer for the service API
|
||||||
|
// operation and parameters.
|
||||||
|
func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
|
||||||
|
return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddDebugHandlers injects debug logging handlers into the service to log request
|
||||||
|
// debug information.
|
||||||
|
func (c *Client) AddDebugHandlers() {
|
||||||
|
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Handlers.Send.PushFront(logRequest)
|
||||||
|
c.Handlers.Send.PushBack(logResponse)
|
||||||
|
}
|
||||||
|
|
||||||
|
const logReqMsg = `DEBUG: Request %s/%s Details:
|
||||||
|
---[ REQUEST POST-SIGN ]-----------------------------
|
||||||
|
%s
|
||||||
|
-----------------------------------------------------`
|
||||||
|
|
||||||
|
func logRequest(r *request.Request) {
|
||||||
|
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||||
|
dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||||
|
|
||||||
|
if logBody {
|
||||||
|
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
|
||||||
|
// Body as a NoOpCloser and will not be reset after read by the HTTP
|
||||||
|
// client reader.
|
||||||
|
r.Body.Seek(r.BodyStart, 0)
|
||||||
|
r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
|
||||||
|
}
|
||||||
|
|
||||||
|
const logRespMsg = `DEBUG: Response %s/%s Details:
|
||||||
|
---[ RESPONSE ]--------------------------------------
|
||||||
|
%s
|
||||||
|
-----------------------------------------------------`
|
||||||
|
|
||||||
|
func logResponse(r *request.Request) {
|
||||||
|
var msg = "no response data"
|
||||||
|
if r.HTTPResponse != nil {
|
||||||
|
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||||
|
dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
|
||||||
|
msg = string(dumpedBody)
|
||||||
|
} else if r.Error != nil {
|
||||||
|
msg = r.Error.Error()
|
||||||
|
}
|
||||||
|
r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
|
||||||
|
}
|
90
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
Normal file
90
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultRetryer implements basic retry logic using exponential backoff for
|
||||||
|
// most services. If you want to implement custom retry logic, implement the
|
||||||
|
// request.Retryer interface or create a structure type that composes this
|
||||||
|
// struct and override the specific methods. For example, to override only
|
||||||
|
// the MaxRetries method:
|
||||||
|
//
|
||||||
|
// type retryer struct {
|
||||||
|
// service.DefaultRetryer
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // This implementation always has 100 max retries
|
||||||
|
// func (d retryer) MaxRetries() uint { return 100 }
|
||||||
|
type DefaultRetryer struct {
|
||||||
|
NumMaxRetries int
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxRetries returns the number of maximum returns the service will use to make
|
||||||
|
// an individual API request.
|
||||||
|
func (d DefaultRetryer) MaxRetries() int {
|
||||||
|
return d.NumMaxRetries
|
||||||
|
}
|
||||||
|
|
||||||
|
var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
|
||||||
|
|
||||||
|
// RetryRules returns the delay duration before retrying this request again
|
||||||
|
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||||
|
// Set the upper limit of delay in retrying at ~five minutes
|
||||||
|
minTime := 30
|
||||||
|
throttle := d.shouldThrottle(r)
|
||||||
|
if throttle {
|
||||||
|
minTime = 500
|
||||||
|
}
|
||||||
|
|
||||||
|
retryCount := r.RetryCount
|
||||||
|
if retryCount > 13 {
|
||||||
|
retryCount = 13
|
||||||
|
} else if throttle && retryCount > 8 {
|
||||||
|
retryCount = 8
|
||||||
|
}
|
||||||
|
|
||||||
|
delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
|
||||||
|
return time.Duration(delay) * time.Millisecond
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldRetry returns true if the request should be retried.
|
||||||
|
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
||||||
|
if r.HTTPResponse.StatusCode >= 500 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return r.IsErrorRetryable() || d.shouldThrottle(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldThrottle returns true if the request should be throttled.
|
||||||
|
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
|
||||||
|
if r.HTTPResponse.StatusCode == 502 ||
|
||||||
|
r.HTTPResponse.StatusCode == 503 ||
|
||||||
|
r.HTTPResponse.StatusCode == 504 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return r.IsErrorThrottle()
|
||||||
|
}
|
||||||
|
|
||||||
|
// lockedSource is a thread-safe implementation of rand.Source
|
||||||
|
type lockedSource struct {
|
||||||
|
lk sync.Mutex
|
||||||
|
src rand.Source
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lockedSource) Int63() (n int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
n = r.src.Int63()
|
||||||
|
r.lk.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lockedSource) Seed(seed int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
r.src.Seed(seed)
|
||||||
|
r.lk.Unlock()
|
||||||
|
}
|
12
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
Normal file
12
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
package metadata
|
||||||
|
|
||||||
|
// ClientInfo wraps immutable data from the client.Client structure.
|
||||||
|
type ClientInfo struct {
|
||||||
|
ServiceName string
|
||||||
|
APIVersion string
|
||||||
|
Endpoint string
|
||||||
|
SigningName string
|
||||||
|
SigningRegion string
|
||||||
|
JSONVersion string
|
||||||
|
TargetPrefix string
|
||||||
|
}
|
422
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
Normal file
422
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
Normal file
|
@ -0,0 +1,422 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UseServiceDefaultRetries instructs the config to use the service's own
|
||||||
|
// default number of retries. This will be the default action if
|
||||||
|
// Config.MaxRetries is nil also.
|
||||||
|
const UseServiceDefaultRetries = -1
|
||||||
|
|
||||||
|
// RequestRetryer is an alias for a type that implements the request.Retryer
|
||||||
|
// interface.
|
||||||
|
type RequestRetryer interface{}
|
||||||
|
|
||||||
|
// A Config provides service configuration for service clients. By default,
|
||||||
|
// all clients will use the defaults.DefaultConfig tructure.
|
||||||
|
//
|
||||||
|
// // Create Session with MaxRetry configuration to be shared by multiple
|
||||||
|
// // service clients.
|
||||||
|
// sess, err := session.NewSession(&aws.Config{
|
||||||
|
// MaxRetries: aws.Int(3),
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// // Create S3 service client with a specific Region.
|
||||||
|
// svc := s3.New(sess, &aws.Config{
|
||||||
|
// Region: aws.String("us-west-2"),
|
||||||
|
// })
|
||||||
|
type Config struct {
|
||||||
|
// Enables verbose error printing of all credential chain errors.
|
||||||
|
// Should be used when wanting to see all errors while attempting to
|
||||||
|
// retrieve credentials.
|
||||||
|
CredentialsChainVerboseErrors *bool
|
||||||
|
|
||||||
|
// The credentials object to use when signing requests. Defaults to a
|
||||||
|
// chain of credential providers to search for credentials in environment
|
||||||
|
// variables, shared credential file, and EC2 Instance Roles.
|
||||||
|
Credentials *credentials.Credentials
|
||||||
|
|
||||||
|
// An optional endpoint URL (hostname only or fully qualified URI)
|
||||||
|
// that overrides the default generated endpoint for a client. Set this
|
||||||
|
// to `""` to use the default generated endpoint.
|
||||||
|
//
|
||||||
|
// @note You must still provide a `Region` value when specifying an
|
||||||
|
// endpoint for a client.
|
||||||
|
Endpoint *string
|
||||||
|
|
||||||
|
// The region to send requests to. This parameter is required and must
|
||||||
|
// be configured globally or on a per-client basis unless otherwise
|
||||||
|
// noted. A full list of regions is found in the "Regions and Endpoints"
|
||||||
|
// document.
|
||||||
|
//
|
||||||
|
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||||
|
// AWS Regions and Endpoints
|
||||||
|
Region *string
|
||||||
|
|
||||||
|
// Set this to `true` to disable SSL when sending requests. Defaults
|
||||||
|
// to `false`.
|
||||||
|
DisableSSL *bool
|
||||||
|
|
||||||
|
// The HTTP client to use when sending requests. Defaults to
|
||||||
|
// `http.DefaultClient`.
|
||||||
|
HTTPClient *http.Client
|
||||||
|
|
||||||
|
// An integer value representing the logging level. The default log level
|
||||||
|
// is zero (LogOff), which represents no logging. To enable logging set
|
||||||
|
// to a LogLevel Value.
|
||||||
|
LogLevel *LogLevelType
|
||||||
|
|
||||||
|
// The logger writer interface to write logging messages to. Defaults to
|
||||||
|
// standard out.
|
||||||
|
Logger Logger
|
||||||
|
|
||||||
|
// The maximum number of times that a request will be retried for failures.
|
||||||
|
// Defaults to -1, which defers the max retry setting to the service
|
||||||
|
// specific configuration.
|
||||||
|
MaxRetries *int
|
||||||
|
|
||||||
|
// Retryer guides how HTTP requests should be retried in case of
|
||||||
|
// recoverable failures.
|
||||||
|
//
|
||||||
|
// When nil or the value does not implement the request.Retryer interface,
|
||||||
|
// the request.DefaultRetryer will be used.
|
||||||
|
//
|
||||||
|
// When both Retryer and MaxRetries are non-nil, the former is used and
|
||||||
|
// the latter ignored.
|
||||||
|
//
|
||||||
|
// To set the Retryer field in a type-safe manner and with chaining, use
|
||||||
|
// the request.WithRetryer helper function:
|
||||||
|
//
|
||||||
|
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
|
||||||
|
//
|
||||||
|
Retryer RequestRetryer
|
||||||
|
|
||||||
|
// Disables semantic parameter validation, which validates input for
|
||||||
|
// missing required fields and/or other semantic request input errors.
|
||||||
|
DisableParamValidation *bool
|
||||||
|
|
||||||
|
// Disables the computation of request and response checksums, e.g.,
|
||||||
|
// CRC32 checksums in Amazon DynamoDB.
|
||||||
|
DisableComputeChecksums *bool
|
||||||
|
|
||||||
|
// Set this to `true` to force the request to use path-style addressing,
|
||||||
|
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
|
||||||
|
// will use virtual hosted bucket addressing when possible
|
||||||
|
// (`http://BUCKET.s3.amazonaws.com/KEY`).
|
||||||
|
//
|
||||||
|
// @note This configuration option is specific to the Amazon S3 service.
|
||||||
|
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
|
||||||
|
// Amazon S3: Virtual Hosting of Buckets
|
||||||
|
S3ForcePathStyle *bool
|
||||||
|
|
||||||
|
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
|
||||||
|
// header to PUT requests over 2MB of content. 100-Continue instructs the
|
||||||
|
// HTTP client not to send the body until the service responds with a
|
||||||
|
// `continue` status. This is useful to prevent sending the request body
|
||||||
|
// until after the request is authenticated, and validated.
|
||||||
|
//
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
||||||
|
//
|
||||||
|
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
|
||||||
|
// `ExpectContinueTimeout` for information on adjusting the continue wait
|
||||||
|
// timeout. https://golang.org/pkg/net/http/#Transport
|
||||||
|
//
|
||||||
|
// You should use this flag to disble 100-Continue if you experience issues
|
||||||
|
// with proxies or third party S3 compatible services.
|
||||||
|
S3Disable100Continue *bool
|
||||||
|
|
||||||
|
// Set this to `true` to enable S3 Accelerate feature. For all operations
|
||||||
|
// compatible with S3 Accelerate will use the accelerate endpoint for
|
||||||
|
// requests. Requests not compatible will fall back to normal S3 requests.
|
||||||
|
//
|
||||||
|
// The bucket must be enable for accelerate to be used with S3 client with
|
||||||
|
// accelerate enabled. If the bucket is not enabled for accelerate an error
|
||||||
|
// will be returned. The bucket name must be DNS compatible to also work
|
||||||
|
// with accelerate.
|
||||||
|
//
|
||||||
|
// Not compatible with UseDualStack requests will fail if both flags are
|
||||||
|
// specified.
|
||||||
|
S3UseAccelerate *bool
|
||||||
|
|
||||||
|
// Set this to `true` to disable the EC2Metadata client from overriding the
|
||||||
|
// default http.Client's Timeout. This is helpful if you do not want the
|
||||||
|
// EC2Metadata client to create a new http.Client. This options is only
|
||||||
|
// meaningful if you're not already using a custom HTTP client with the
|
||||||
|
// SDK. Enabled by default.
|
||||||
|
//
|
||||||
|
// Must be set and provided to the session.NewSession() in order to disable
|
||||||
|
// the EC2Metadata overriding the timeout for default credentials chain.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
|
||||||
|
//
|
||||||
|
// svc := s3.New(sess)
|
||||||
|
//
|
||||||
|
EC2MetadataDisableTimeoutOverride *bool
|
||||||
|
|
||||||
|
// Instructs the endpiont to be generated for a service client to
|
||||||
|
// be the dual stack endpoint. The dual stack endpoint will support
|
||||||
|
// both IPv4 and IPv6 addressing.
|
||||||
|
//
|
||||||
|
// Setting this for a service which does not support dual stack will fail
|
||||||
|
// to make requets. It is not recommended to set this value on the session
|
||||||
|
// as it will apply to all service clients created with the session. Even
|
||||||
|
// services which don't support dual stack endpoints.
|
||||||
|
//
|
||||||
|
// If the Endpoint config value is also provided the UseDualStack flag
|
||||||
|
// will be ignored.
|
||||||
|
//
|
||||||
|
// Only supported with.
|
||||||
|
//
|
||||||
|
// sess, err := session.NewSession()
|
||||||
|
//
|
||||||
|
// svc := s3.New(sess, &aws.Config{
|
||||||
|
// UseDualStack: aws.Bool(true),
|
||||||
|
// })
|
||||||
|
UseDualStack *bool
|
||||||
|
|
||||||
|
// SleepDelay is an override for the func the SDK will call when sleeping
|
||||||
|
// during the lifecycle of a request. Specifically this will be used for
|
||||||
|
// request delays. This value should only be used for testing. To adjust
|
||||||
|
// the delay of a request see the aws/client.DefaultRetryer and
|
||||||
|
// aws/request.Retryer.
|
||||||
|
SleepDelay func(time.Duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConfig returns a new Config pointer that can be chained with builder
|
||||||
|
// methods to set multiple configuration values inline without using pointers.
|
||||||
|
//
|
||||||
|
// // Create Session with MaxRetry configuration to be shared by multiple
|
||||||
|
// // service clients.
|
||||||
|
// sess, err := session.NewSession(aws.NewConfig().
|
||||||
|
// WithMaxRetries(3),
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// // Create S3 service client with a specific Region.
|
||||||
|
// svc := s3.New(sess, aws.NewConfig().
|
||||||
|
// WithRegion("us-west-2"),
|
||||||
|
// )
|
||||||
|
func NewConfig() *Config {
|
||||||
|
return &Config{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
|
||||||
|
// a Config pointer.
|
||||||
|
func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
|
||||||
|
c.CredentialsChainVerboseErrors = &verboseErrs
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCredentials sets a config Credentials value returning a Config pointer
|
||||||
|
// for chaining.
|
||||||
|
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
|
||||||
|
c.Credentials = creds
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEndpoint sets a config Endpoint value returning a Config pointer for
|
||||||
|
// chaining.
|
||||||
|
func (c *Config) WithEndpoint(endpoint string) *Config {
|
||||||
|
c.Endpoint = &endpoint
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRegion sets a config Region value returning a Config pointer for
|
||||||
|
// chaining.
|
||||||
|
func (c *Config) WithRegion(region string) *Config {
|
||||||
|
c.Region = ®ion
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
|
||||||
|
// for chaining.
|
||||||
|
func (c *Config) WithDisableSSL(disable bool) *Config {
|
||||||
|
c.DisableSSL = &disable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
|
||||||
|
// for chaining.
|
||||||
|
func (c *Config) WithHTTPClient(client *http.Client) *Config {
|
||||||
|
c.HTTPClient = client
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
|
||||||
|
// for chaining.
|
||||||
|
func (c *Config) WithMaxRetries(max int) *Config {
|
||||||
|
c.MaxRetries = &max
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisableParamValidation sets a config DisableParamValidation value
|
||||||
|
// returning a Config pointer for chaining.
|
||||||
|
func (c *Config) WithDisableParamValidation(disable bool) *Config {
|
||||||
|
c.DisableParamValidation = &disable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
|
||||||
|
// returning a Config pointer for chaining.
|
||||||
|
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
|
||||||
|
c.DisableComputeChecksums = &disable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLogLevel sets a config LogLevel value returning a Config pointer for
|
||||||
|
// chaining.
|
||||||
|
func (c *Config) WithLogLevel(level LogLevelType) *Config {
|
||||||
|
c.LogLevel = &level
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLogger sets a config Logger value returning a Config pointer for
|
||||||
|
// chaining.
|
||||||
|
func (c *Config) WithLogger(logger Logger) *Config {
|
||||||
|
c.Logger = logger
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
|
||||||
|
// pointer for chaining.
|
||||||
|
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
|
||||||
|
c.S3ForcePathStyle = &force
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
|
||||||
|
// a Config pointer for chaining.
|
||||||
|
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
|
||||||
|
c.S3Disable100Continue = &disable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
|
||||||
|
// pointer for chaining.
|
||||||
|
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
|
||||||
|
c.S3UseAccelerate = &enable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUseDualStack sets a config UseDualStack value returning a Config
|
||||||
|
// pointer for chaining.
|
||||||
|
func (c *Config) WithUseDualStack(enable bool) *Config {
|
||||||
|
c.UseDualStack = &enable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
|
||||||
|
// returning a Config pointer for chaining.
|
||||||
|
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
|
||||||
|
c.EC2MetadataDisableTimeoutOverride = &enable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSleepDelay overrides the function used to sleep while waiting for the
|
||||||
|
// next retry. Defaults to time.Sleep.
|
||||||
|
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
|
||||||
|
c.SleepDelay = fn
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeIn merges the passed in configs into the existing config object.
|
||||||
|
func (c *Config) MergeIn(cfgs ...*Config) {
|
||||||
|
for _, other := range cfgs {
|
||||||
|
mergeInConfig(c, other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeInConfig(dst *Config, other *Config) {
|
||||||
|
if other == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.CredentialsChainVerboseErrors != nil {
|
||||||
|
dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.Credentials != nil {
|
||||||
|
dst.Credentials = other.Credentials
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.Endpoint != nil {
|
||||||
|
dst.Endpoint = other.Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.Region != nil {
|
||||||
|
dst.Region = other.Region
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.DisableSSL != nil {
|
||||||
|
dst.DisableSSL = other.DisableSSL
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.HTTPClient != nil {
|
||||||
|
dst.HTTPClient = other.HTTPClient
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.LogLevel != nil {
|
||||||
|
dst.LogLevel = other.LogLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.Logger != nil {
|
||||||
|
dst.Logger = other.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.MaxRetries != nil {
|
||||||
|
dst.MaxRetries = other.MaxRetries
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.Retryer != nil {
|
||||||
|
dst.Retryer = other.Retryer
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.DisableParamValidation != nil {
|
||||||
|
dst.DisableParamValidation = other.DisableParamValidation
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.DisableComputeChecksums != nil {
|
||||||
|
dst.DisableComputeChecksums = other.DisableComputeChecksums
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.S3ForcePathStyle != nil {
|
||||||
|
dst.S3ForcePathStyle = other.S3ForcePathStyle
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.S3Disable100Continue != nil {
|
||||||
|
dst.S3Disable100Continue = other.S3Disable100Continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.S3UseAccelerate != nil {
|
||||||
|
dst.S3UseAccelerate = other.S3UseAccelerate
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.UseDualStack != nil {
|
||||||
|
dst.UseDualStack = other.UseDualStack
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.EC2MetadataDisableTimeoutOverride != nil {
|
||||||
|
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.SleepDelay != nil {
|
||||||
|
dst.SleepDelay = other.SleepDelay
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy will return a shallow copy of the Config object. If any additional
|
||||||
|
// configurations are provided they will be merged into the new config returned.
|
||||||
|
func (c *Config) Copy(cfgs ...*Config) *Config {
|
||||||
|
dst := &Config{}
|
||||||
|
dst.MergeIn(c)
|
||||||
|
|
||||||
|
for _, cfg := range cfgs {
|
||||||
|
dst.MergeIn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst
|
||||||
|
}
|
369
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
Normal file
369
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
Normal file
|
@ -0,0 +1,369 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// String returns a pointer to the string value passed in.
|
||||||
|
func String(v string) *string {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringValue returns the value of the string pointer passed in or
|
||||||
|
// "" if the pointer is nil.
|
||||||
|
func StringValue(v *string) string {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringSlice converts a slice of string values into a slice of
|
||||||
|
// string pointers
|
||||||
|
func StringSlice(src []string) []*string {
|
||||||
|
dst := make([]*string, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringValueSlice converts a slice of string pointers into a slice of
|
||||||
|
// string values
|
||||||
|
func StringValueSlice(src []*string) []string {
|
||||||
|
dst := make([]string, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringMap converts a string map of string values into a string
|
||||||
|
// map of string pointers
|
||||||
|
func StringMap(src map[string]string) map[string]*string {
|
||||||
|
dst := make(map[string]*string)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringValueMap converts a string map of string pointers into a string
|
||||||
|
// map of string values
|
||||||
|
func StringValueMap(src map[string]*string) map[string]string {
|
||||||
|
dst := make(map[string]string)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns a pointer to the bool value passed in.
|
||||||
|
func Bool(v bool) *bool {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolValue returns the value of the bool pointer passed in or
|
||||||
|
// false if the pointer is nil.
|
||||||
|
func BoolValue(v *bool) bool {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolSlice converts a slice of bool values into a slice of
|
||||||
|
// bool pointers
|
||||||
|
func BoolSlice(src []bool) []*bool {
|
||||||
|
dst := make([]*bool, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolValueSlice converts a slice of bool pointers into a slice of
|
||||||
|
// bool values
|
||||||
|
func BoolValueSlice(src []*bool) []bool {
|
||||||
|
dst := make([]bool, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolMap converts a string map of bool values into a string
|
||||||
|
// map of bool pointers
|
||||||
|
func BoolMap(src map[string]bool) map[string]*bool {
|
||||||
|
dst := make(map[string]*bool)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolValueMap converts a string map of bool pointers into a string
|
||||||
|
// map of bool values
|
||||||
|
func BoolValueMap(src map[string]*bool) map[string]bool {
|
||||||
|
dst := make(map[string]bool)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns a pointer to the int value passed in.
|
||||||
|
func Int(v int) *int {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntValue returns the value of the int pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func IntValue(v *int) int {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntSlice converts a slice of int values into a slice of
|
||||||
|
// int pointers
|
||||||
|
func IntSlice(src []int) []*int {
|
||||||
|
dst := make([]*int, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntValueSlice converts a slice of int pointers into a slice of
|
||||||
|
// int values
|
||||||
|
func IntValueSlice(src []*int) []int {
|
||||||
|
dst := make([]int, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntMap converts a string map of int values into a string
|
||||||
|
// map of int pointers
|
||||||
|
func IntMap(src map[string]int) map[string]*int {
|
||||||
|
dst := make(map[string]*int)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntValueMap converts a string map of int pointers into a string
|
||||||
|
// map of int values
|
||||||
|
func IntValueMap(src map[string]*int) map[string]int {
|
||||||
|
dst := make(map[string]int)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 returns a pointer to the int64 value passed in.
|
||||||
|
func Int64(v int64) *int64 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Value returns the value of the int64 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Int64Value(v *int64) int64 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Slice converts a slice of int64 values into a slice of
|
||||||
|
// int64 pointers
|
||||||
|
func Int64Slice(src []int64) []*int64 {
|
||||||
|
dst := make([]*int64, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64ValueSlice converts a slice of int64 pointers into a slice of
|
||||||
|
// int64 values
|
||||||
|
func Int64ValueSlice(src []*int64) []int64 {
|
||||||
|
dst := make([]int64, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Map converts a string map of int64 values into a string
|
||||||
|
// map of int64 pointers
|
||||||
|
func Int64Map(src map[string]int64) map[string]*int64 {
|
||||||
|
dst := make(map[string]*int64)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64ValueMap converts a string map of int64 pointers into a string
|
||||||
|
// map of int64 values
|
||||||
|
func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
||||||
|
dst := make(map[string]int64)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 returns a pointer to the float64 value passed in.
|
||||||
|
func Float64(v float64) *float64 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Value returns the value of the float64 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Float64Value(v *float64) float64 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Slice converts a slice of float64 values into a slice of
|
||||||
|
// float64 pointers
|
||||||
|
func Float64Slice(src []float64) []*float64 {
|
||||||
|
dst := make([]*float64, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64ValueSlice converts a slice of float64 pointers into a slice of
|
||||||
|
// float64 values
|
||||||
|
func Float64ValueSlice(src []*float64) []float64 {
|
||||||
|
dst := make([]float64, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Map converts a string map of float64 values into a string
|
||||||
|
// map of float64 pointers
|
||||||
|
func Float64Map(src map[string]float64) map[string]*float64 {
|
||||||
|
dst := make(map[string]*float64)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64ValueMap converts a string map of float64 pointers into a string
|
||||||
|
// map of float64 values
|
||||||
|
func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
||||||
|
dst := make(map[string]float64)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time returns a pointer to the time.Time value passed in.
|
||||||
|
func Time(v time.Time) *time.Time {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeValue returns the value of the time.Time pointer passed in or
|
||||||
|
// time.Time{} if the pointer is nil.
|
||||||
|
func TimeValue(v *time.Time) time.Time {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
|
||||||
|
// The result is undefined if the Unix time cannot be represented by an int64.
|
||||||
|
// Which includes calling TimeUnixMilli on a zero Time is undefined.
|
||||||
|
//
|
||||||
|
// This utility is useful for service API's such as CloudWatch Logs which require
|
||||||
|
// their unix time values to be in milliseconds.
|
||||||
|
//
|
||||||
|
// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
|
||||||
|
func TimeUnixMilli(t time.Time) int64 {
|
||||||
|
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeSlice converts a slice of time.Time values into a slice of
|
||||||
|
// time.Time pointers
|
||||||
|
func TimeSlice(src []time.Time) []*time.Time {
|
||||||
|
dst := make([]*time.Time, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeValueSlice converts a slice of time.Time pointers into a slice of
|
||||||
|
// time.Time values
|
||||||
|
func TimeValueSlice(src []*time.Time) []time.Time {
|
||||||
|
dst := make([]time.Time, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeMap converts a string map of time.Time values into a string
|
||||||
|
// map of time.Time pointers
|
||||||
|
func TimeMap(src map[string]time.Time) map[string]*time.Time {
|
||||||
|
dst := make(map[string]*time.Time)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeValueMap converts a string map of time.Time pointers into a string
|
||||||
|
// map of time.Time values
|
||||||
|
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
|
||||||
|
dst := make(map[string]time.Time)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
152
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
Normal file
152
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
Normal file
|
@ -0,0 +1,152 @@
|
||||||
|
package corehandlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Interface for matching types which also have a Len method.
|
||||||
|
type lener interface {
|
||||||
|
Len() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildContentLengthHandler builds the content length of a request based on the body,
|
||||||
|
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
|
||||||
|
// to determine request body length and no "Content-Length" was specified it will panic.
|
||||||
|
//
|
||||||
|
// The Content-Length will only be aded to the request if the length of the body
|
||||||
|
// is greater than 0. If the body is empty or the current `Content-Length`
|
||||||
|
// header is <= 0, the header will also be stripped.
|
||||||
|
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
|
||||||
|
var length int64
|
||||||
|
|
||||||
|
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
||||||
|
length, _ = strconv.ParseInt(slength, 10, 64)
|
||||||
|
} else {
|
||||||
|
switch body := r.Body.(type) {
|
||||||
|
case nil:
|
||||||
|
length = 0
|
||||||
|
case lener:
|
||||||
|
length = int64(body.Len())
|
||||||
|
case io.Seeker:
|
||||||
|
r.BodyStart, _ = body.Seek(0, 1)
|
||||||
|
end, _ := body.Seek(0, 2)
|
||||||
|
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
|
||||||
|
length = end - r.BodyStart
|
||||||
|
default:
|
||||||
|
panic("Cannot get length of body, must provide `ContentLength`")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if length > 0 {
|
||||||
|
r.HTTPRequest.ContentLength = length
|
||||||
|
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
||||||
|
} else {
|
||||||
|
r.HTTPRequest.ContentLength = 0
|
||||||
|
r.HTTPRequest.Header.Del("Content-Length")
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
|
||||||
|
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
|
||||||
|
var SDKVersionUserAgentHandler = request.NamedHandler{
|
||||||
|
Name: "core.SDKVersionUserAgentHandler",
|
||||||
|
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
|
||||||
|
runtime.Version(), runtime.GOOS, runtime.GOARCH),
|
||||||
|
}
|
||||||
|
|
||||||
|
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
|
||||||
|
|
||||||
|
// SendHandler is a request handler to send service request using HTTP client.
|
||||||
|
var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) {
|
||||||
|
var err error
|
||||||
|
r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
|
||||||
|
if err != nil {
|
||||||
|
// Prevent leaking if an HTTPResponse was returned. Clean up
|
||||||
|
// the body.
|
||||||
|
if r.HTTPResponse != nil {
|
||||||
|
r.HTTPResponse.Body.Close()
|
||||||
|
}
|
||||||
|
// Capture the case where url.Error is returned for error processing
|
||||||
|
// response. e.g. 301 without location header comes back as string
|
||||||
|
// error and r.HTTPResponse is nil. Other url redirect errors will
|
||||||
|
// comeback in a similar method.
|
||||||
|
if e, ok := err.(*url.Error); ok && e.Err != nil {
|
||||||
|
if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
|
||||||
|
code, _ := strconv.ParseInt(s[1], 10, 64)
|
||||||
|
r.HTTPResponse = &http.Response{
|
||||||
|
StatusCode: int(code),
|
||||||
|
Status: http.StatusText(int(code)),
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r.HTTPResponse == nil {
|
||||||
|
// Add a dummy request response object to ensure the HTTPResponse
|
||||||
|
// value is consistent.
|
||||||
|
r.HTTPResponse = &http.Response{
|
||||||
|
StatusCode: int(0),
|
||||||
|
Status: http.StatusText(int(0)),
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Catch all other request errors.
|
||||||
|
r.Error = awserr.New("RequestError", "send request failed", err)
|
||||||
|
r.Retryable = aws.Bool(true) // network errors are retryable
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
|
||||||
|
// ValidateResponseHandler is a request handler to validate service response.
|
||||||
|
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
|
||||||
|
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
|
||||||
|
// this may be replaced by an UnmarshalError handler
|
||||||
|
r.Error = awserr.New("UnknownError", "unknown error", nil)
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
|
||||||
|
// AfterRetryHandler performs final checks to determine if the request should
|
||||||
|
// be retried and how long to delay.
|
||||||
|
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
|
||||||
|
// If one of the other handlers already set the retry state
|
||||||
|
// we don't want to override it based on the service's state
|
||||||
|
if r.Retryable == nil {
|
||||||
|
r.Retryable = aws.Bool(r.ShouldRetry(r))
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.WillRetry() {
|
||||||
|
r.RetryDelay = r.RetryRules(r)
|
||||||
|
r.Config.SleepDelay(r.RetryDelay)
|
||||||
|
|
||||||
|
// when the expired token exception occurs the credentials
|
||||||
|
// need to be expired locally so that the next request to
|
||||||
|
// get credentials will trigger a credentials refresh.
|
||||||
|
if r.IsErrorExpired() {
|
||||||
|
r.Config.Credentials.Expire()
|
||||||
|
}
|
||||||
|
|
||||||
|
r.RetryCount++
|
||||||
|
r.Error = nil
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
|
||||||
|
// ValidateEndpointHandler is a request handler to validate a request had the
|
||||||
|
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
|
||||||
|
// region is not valid.
|
||||||
|
var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
|
||||||
|
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
|
||||||
|
r.Error = aws.ErrMissingRegion
|
||||||
|
} else if r.ClientInfo.Endpoint == "" {
|
||||||
|
r.Error = aws.ErrMissingEndpoint
|
||||||
|
}
|
||||||
|
}}
|
17
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
Normal file
17
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
package corehandlers
|
||||||
|
|
||||||
|
import "github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
|
||||||
|
// ValidateParametersHandler is a request handler to validate the input parameters.
|
||||||
|
// Validating parameters only has meaning if done prior to the request being sent.
|
||||||
|
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
|
||||||
|
if !r.ParamsFilled() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := r.Params.(request.Validator); ok {
|
||||||
|
if err := v.Validate(); err != nil {
|
||||||
|
r.Error = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}
|
100
vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
Normal file
100
vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNoValidProvidersFoundInChain Is returned when there are no valid
|
||||||
|
// providers in the ChainProvider.
|
||||||
|
//
|
||||||
|
// This has been deprecated. For verbose error messaging set
|
||||||
|
// aws.Config.CredentialsChainVerboseErrors to true
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
|
||||||
|
`no valid providers in chain. Deprecated.
|
||||||
|
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
|
||||||
|
nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// A ChainProvider will search for a provider which returns credentials
|
||||||
|
// and cache that provider until Retrieve is called again.
|
||||||
|
//
|
||||||
|
// The ChainProvider provides a way of chaining multiple providers together
|
||||||
|
// which will pick the first available using priority order of the Providers
|
||||||
|
// in the list.
|
||||||
|
//
|
||||||
|
// If none of the Providers retrieve valid credentials Value, ChainProvider's
|
||||||
|
// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
|
||||||
|
//
|
||||||
|
// If a Provider is found which returns valid credentials Value ChainProvider
|
||||||
|
// will cache that Provider for all calls to IsExpired(), until Retrieve is
|
||||||
|
// called again.
|
||||||
|
//
|
||||||
|
// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
|
||||||
|
// In this example EnvProvider will first check if any credentials are available
|
||||||
|
// vai the environment variables. If there are none ChainProvider will check
|
||||||
|
// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
|
||||||
|
// does not return any credentials ChainProvider will return the error
|
||||||
|
// ErrNoValidProvidersFoundInChain
|
||||||
|
//
|
||||||
|
// creds := NewChainCredentials(
|
||||||
|
// []Provider{
|
||||||
|
// &EnvProvider{},
|
||||||
|
// &EC2RoleProvider{
|
||||||
|
// Client: ec2metadata.New(sess),
|
||||||
|
// },
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// // Usage of ChainCredentials with aws.Config
|
||||||
|
// svc := ec2.New(&aws.Config{Credentials: creds})
|
||||||
|
//
|
||||||
|
type ChainProvider struct {
|
||||||
|
Providers []Provider
|
||||||
|
curr Provider
|
||||||
|
VerboseErrors bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChainCredentials returns a pointer to a new Credentials object
|
||||||
|
// wrapping a chain of providers.
|
||||||
|
func NewChainCredentials(providers []Provider) *Credentials {
|
||||||
|
return NewCredentials(&ChainProvider{
|
||||||
|
Providers: append([]Provider{}, providers...),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve returns the credentials value or error if no provider returned
|
||||||
|
// without error.
|
||||||
|
//
|
||||||
|
// If a provider is found it will be cached and any calls to IsExpired()
|
||||||
|
// will return the expired state of the cached provider.
|
||||||
|
func (c *ChainProvider) Retrieve() (Value, error) {
|
||||||
|
var errs []error
|
||||||
|
for _, p := range c.Providers {
|
||||||
|
creds, err := p.Retrieve()
|
||||||
|
if err == nil {
|
||||||
|
c.curr = p
|
||||||
|
return creds, nil
|
||||||
|
}
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
c.curr = nil
|
||||||
|
|
||||||
|
var err error
|
||||||
|
err = ErrNoValidProvidersFoundInChain
|
||||||
|
if c.VerboseErrors {
|
||||||
|
err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
|
||||||
|
}
|
||||||
|
return Value{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired will returned the expired state of the currently cached provider
|
||||||
|
// if there is one. If there is no current provider, true will be returned.
|
||||||
|
func (c *ChainProvider) IsExpired() bool {
|
||||||
|
if c.curr != nil {
|
||||||
|
return c.curr.IsExpired()
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
223
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
Normal file
223
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
Normal file
|
@ -0,0 +1,223 @@
|
||||||
|
// Package credentials provides credential retrieval and management
|
||||||
|
//
|
||||||
|
// The Credentials is the primary method of getting access to and managing
|
||||||
|
// credentials Values. Using dependency injection retrieval of the credential
|
||||||
|
// values is handled by a object which satisfies the Provider interface.
|
||||||
|
//
|
||||||
|
// By default the Credentials.Get() will cache the successful result of a
|
||||||
|
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
|
||||||
|
// point Credentials will call Provider's Retrieve() to get new credential Value.
|
||||||
|
//
|
||||||
|
// The Provider is responsible for determining when credentials Value have expired.
|
||||||
|
// It is also important to note that Credentials will always call Retrieve the
|
||||||
|
// first time Credentials.Get() is called.
|
||||||
|
//
|
||||||
|
// Example of using the environment variable credentials.
|
||||||
|
//
|
||||||
|
// creds := NewEnvCredentials()
|
||||||
|
//
|
||||||
|
// // Retrieve the credentials value
|
||||||
|
// credValue, err := creds.Get()
|
||||||
|
// if err != nil {
|
||||||
|
// // handle error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example of forcing credentials to expire and be refreshed on the next Get().
|
||||||
|
// This may be helpful to proactively expire credentials and refresh them sooner
|
||||||
|
// than they would naturally expire on their own.
|
||||||
|
//
|
||||||
|
// creds := NewCredentials(&EC2RoleProvider{})
|
||||||
|
// creds.Expire()
|
||||||
|
// credsValue, err := creds.Get()
|
||||||
|
// // New credentials will be retrieved instead of from cache.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Custom Provider
|
||||||
|
//
|
||||||
|
// Each Provider built into this package also provides a helper method to generate
|
||||||
|
// a Credentials pointer setup with the provider. To use a custom Provider just
|
||||||
|
// create a type which satisfies the Provider interface and pass it to the
|
||||||
|
// NewCredentials method.
|
||||||
|
//
|
||||||
|
// type MyProvider struct{}
|
||||||
|
// func (m *MyProvider) Retrieve() (Value, error) {...}
|
||||||
|
// func (m *MyProvider) IsExpired() bool {...}
|
||||||
|
//
|
||||||
|
// creds := NewCredentials(&MyProvider{})
|
||||||
|
// credValue, err := creds.Get()
|
||||||
|
//
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnonymousCredentials is an empty Credential object that can be used as
|
||||||
|
// dummy placeholder credentials for requests that do not need signed.
|
||||||
|
//
|
||||||
|
// This Credentials can be used to configure a service to not sign requests
|
||||||
|
// when making service API calls. For example, when accessing public
|
||||||
|
// s3 buckets.
|
||||||
|
//
|
||||||
|
// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
|
||||||
|
// // Access public S3 buckets.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
var AnonymousCredentials = NewStaticCredentials("", "", "")
|
||||||
|
|
||||||
|
// A Value is the AWS credentials value for individual credential fields.
|
||||||
|
type Value struct {
|
||||||
|
// AWS Access key ID
|
||||||
|
AccessKeyID string
|
||||||
|
|
||||||
|
// AWS Secret Access Key
|
||||||
|
SecretAccessKey string
|
||||||
|
|
||||||
|
// AWS Session Token
|
||||||
|
SessionToken string
|
||||||
|
|
||||||
|
// Provider used to get credentials
|
||||||
|
ProviderName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Provider is the interface for any component which will provide credentials
|
||||||
|
// Value. A provider is required to manage its own Expired state, and what to
|
||||||
|
// be expired means.
|
||||||
|
//
|
||||||
|
// The Provider should not need to implement its own mutexes, because
|
||||||
|
// that will be managed by Credentials.
|
||||||
|
type Provider interface {
|
||||||
|
// Refresh returns nil if it successfully retrieved the value.
|
||||||
|
// Error is returned if the value were not obtainable, or empty.
|
||||||
|
Retrieve() (Value, error)
|
||||||
|
|
||||||
|
// IsExpired returns if the credentials are no longer valid, and need
|
||||||
|
// to be retrieved.
|
||||||
|
IsExpired() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Expiry provides shared expiration logic to be used by credentials
|
||||||
|
// providers to implement expiry functionality.
|
||||||
|
//
|
||||||
|
// The best method to use this struct is as an anonymous field within the
|
||||||
|
// provider's struct.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// type EC2RoleProvider struct {
|
||||||
|
// Expiry
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
type Expiry struct {
|
||||||
|
// The date/time when to expire on
|
||||||
|
expiration time.Time
|
||||||
|
|
||||||
|
// If set will be used by IsExpired to determine the current time.
|
||||||
|
// Defaults to time.Now if CurrentTime is not set. Available for testing
|
||||||
|
// to be able to mock out the current time.
|
||||||
|
CurrentTime func() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExpiration sets the expiration IsExpired will check when called.
|
||||||
|
//
|
||||||
|
// If window is greater than 0 the expiration time will be reduced by the
|
||||||
|
// window value.
|
||||||
|
//
|
||||||
|
// Using a window is helpful to trigger credentials to expire sooner than
|
||||||
|
// the expiration time given to ensure no requests are made with expired
|
||||||
|
// tokens.
|
||||||
|
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
||||||
|
e.expiration = expiration
|
||||||
|
if window > 0 {
|
||||||
|
e.expiration = e.expiration.Add(-window)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns if the credentials are expired.
|
||||||
|
func (e *Expiry) IsExpired() bool {
|
||||||
|
if e.CurrentTime == nil {
|
||||||
|
e.CurrentTime = time.Now
|
||||||
|
}
|
||||||
|
return e.expiration.Before(e.CurrentTime())
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Credentials provides synchronous safe retrieval of AWS credentials Value.
|
||||||
|
// Credentials will cache the credentials value until they expire. Once the value
|
||||||
|
// expires the next Get will attempt to retrieve valid credentials.
|
||||||
|
//
|
||||||
|
// Credentials is safe to use across multiple goroutines and will manage the
|
||||||
|
// synchronous state so the Providers do not need to implement their own
|
||||||
|
// synchronization.
|
||||||
|
//
|
||||||
|
// The first Credentials.Get() will always call Provider.Retrieve() to get the
|
||||||
|
// first instance of the credentials Value. All calls to Get() after that
|
||||||
|
// will return the cached credentials Value until IsExpired() returns true.
|
||||||
|
type Credentials struct {
|
||||||
|
creds Value
|
||||||
|
forceRefresh bool
|
||||||
|
m sync.Mutex
|
||||||
|
|
||||||
|
provider Provider
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentials returns a pointer to a new Credentials with the provider set.
|
||||||
|
func NewCredentials(provider Provider) *Credentials {
|
||||||
|
return &Credentials{
|
||||||
|
provider: provider,
|
||||||
|
forceRefresh: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the credentials value, or error if the credentials Value failed
|
||||||
|
// to be retrieved.
|
||||||
|
//
|
||||||
|
// Will return the cached credentials Value if it has not expired. If the
|
||||||
|
// credentials Value has expired the Provider's Retrieve() will be called
|
||||||
|
// to refresh the credentials.
|
||||||
|
//
|
||||||
|
// If Credentials.Expire() was called the credentials Value will be force
|
||||||
|
// expired, and the next call to Get() will cause them to be refreshed.
|
||||||
|
func (c *Credentials) Get() (Value, error) {
|
||||||
|
c.m.Lock()
|
||||||
|
defer c.m.Unlock()
|
||||||
|
|
||||||
|
if c.isExpired() {
|
||||||
|
creds, err := c.provider.Retrieve()
|
||||||
|
if err != nil {
|
||||||
|
return Value{}, err
|
||||||
|
}
|
||||||
|
c.creds = creds
|
||||||
|
c.forceRefresh = false
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.creds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expire expires the credentials and forces them to be retrieved on the
|
||||||
|
// next call to Get().
|
||||||
|
//
|
||||||
|
// This will override the Provider's expired state, and force Credentials
|
||||||
|
// to call the Provider's Retrieve().
|
||||||
|
func (c *Credentials) Expire() {
|
||||||
|
c.m.Lock()
|
||||||
|
defer c.m.Unlock()
|
||||||
|
|
||||||
|
c.forceRefresh = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns if the credentials are no longer valid, and need
|
||||||
|
// to be retrieved.
|
||||||
|
//
|
||||||
|
// If the Credentials were forced to be expired with Expire() this will
|
||||||
|
// reflect that override.
|
||||||
|
func (c *Credentials) IsExpired() bool {
|
||||||
|
c.m.Lock()
|
||||||
|
defer c.m.Unlock()
|
||||||
|
|
||||||
|
return c.isExpired()
|
||||||
|
}
|
||||||
|
|
||||||
|
// isExpired helper method wrapping the definition of expired credentials.
|
||||||
|
func (c *Credentials) isExpired() bool {
|
||||||
|
return c.forceRefresh || c.provider.IsExpired()
|
||||||
|
}
|
178
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
Normal file
178
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
||||||
|
package ec2rolecreds
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProviderName provides a name of EC2Role provider
|
||||||
|
const ProviderName = "EC2RoleProvider"
|
||||||
|
|
||||||
|
// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
|
||||||
|
// those credentials are expired.
|
||||||
|
//
|
||||||
|
// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
|
||||||
|
// or ExpiryWindow
|
||||||
|
//
|
||||||
|
// p := &ec2rolecreds.EC2RoleProvider{
|
||||||
|
// // Pass in a custom timeout to be used when requesting
|
||||||
|
// // IAM EC2 Role credentials.
|
||||||
|
// Client: ec2metadata.New(sess, aws.Config{
|
||||||
|
// HTTPClient: &http.Client{Timeout: 10 * time.Second},
|
||||||
|
// }),
|
||||||
|
//
|
||||||
|
// // Do not use early expiry of credentials. If a non zero value is
|
||||||
|
// // specified the credentials will be expired early
|
||||||
|
// ExpiryWindow: 0,
|
||||||
|
// }
|
||||||
|
type EC2RoleProvider struct {
|
||||||
|
credentials.Expiry
|
||||||
|
|
||||||
|
// Required EC2Metadata client to use when connecting to EC2 metadata service.
|
||||||
|
Client *ec2metadata.EC2Metadata
|
||||||
|
|
||||||
|
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||||
|
// the credentials actually expiring. This is beneficial so race conditions
|
||||||
|
// with expiring credentials do not cause request to fail unexpectedly
|
||||||
|
// due to ExpiredTokenException exceptions.
|
||||||
|
//
|
||||||
|
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||||
|
// 10 seconds before the credentials are actually expired.
|
||||||
|
//
|
||||||
|
// If ExpiryWindow is 0 or less it will be ignored.
|
||||||
|
ExpiryWindow time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentials returns a pointer to a new Credentials object wrapping
|
||||||
|
// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
|
||||||
|
// The ConfigProvider is satisfied by the session.Session type.
|
||||||
|
func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
|
||||||
|
p := &EC2RoleProvider{
|
||||||
|
Client: ec2metadata.New(c),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return credentials.NewCredentials(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
|
||||||
|
// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
|
||||||
|
// metadata service.
|
||||||
|
func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
|
||||||
|
p := &EC2RoleProvider{
|
||||||
|
Client: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return credentials.NewCredentials(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve retrieves credentials from the EC2 service.
|
||||||
|
// Error will be returned if the request fails, or unable to extract
|
||||||
|
// the desired credentials.
|
||||||
|
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
|
||||||
|
credsList, err := requestCredList(m.Client)
|
||||||
|
if err != nil {
|
||||||
|
return credentials.Value{ProviderName: ProviderName}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(credsList) == 0 {
|
||||||
|
return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
|
||||||
|
}
|
||||||
|
credsName := credsList[0]
|
||||||
|
|
||||||
|
roleCreds, err := requestCred(m.Client, credsName)
|
||||||
|
if err != nil {
|
||||||
|
return credentials.Value{ProviderName: ProviderName}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
|
||||||
|
|
||||||
|
return credentials.Value{
|
||||||
|
AccessKeyID: roleCreds.AccessKeyID,
|
||||||
|
SecretAccessKey: roleCreds.SecretAccessKey,
|
||||||
|
SessionToken: roleCreds.Token,
|
||||||
|
ProviderName: ProviderName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A ec2RoleCredRespBody provides the shape for unmarshalling credential
|
||||||
|
// request responses.
|
||||||
|
type ec2RoleCredRespBody struct {
|
||||||
|
// Success State
|
||||||
|
Expiration time.Time
|
||||||
|
AccessKeyID string
|
||||||
|
SecretAccessKey string
|
||||||
|
Token string
|
||||||
|
|
||||||
|
// Error state
|
||||||
|
Code string
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
const iamSecurityCredsPath = "/iam/security-credentials"
|
||||||
|
|
||||||
|
// requestCredList requests a list of credentials from the EC2 service.
|
||||||
|
// If there are no credentials, or there is an error making or receiving the request
|
||||||
|
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
||||||
|
resp, err := client.GetMetadata(iamSecurityCredsPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
credsList := []string{}
|
||||||
|
s := bufio.NewScanner(strings.NewReader(resp))
|
||||||
|
for s.Scan() {
|
||||||
|
credsList = append(credsList, s.Text())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return credsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// requestCred requests the credentials for a specific credentials from the EC2 service.
|
||||||
|
//
|
||||||
|
// If the credentials cannot be found, or there is an error reading the response
|
||||||
|
// and error will be returned.
|
||||||
|
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
|
||||||
|
resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
|
||||||
|
if err != nil {
|
||||||
|
return ec2RoleCredRespBody{},
|
||||||
|
awserr.New("EC2RoleRequestError",
|
||||||
|
fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
respCreds := ec2RoleCredRespBody{}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
|
||||||
|
return ec2RoleCredRespBody{},
|
||||||
|
awserr.New("SerializationError",
|
||||||
|
fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if respCreds.Code != "Success" {
|
||||||
|
// If an error code was returned something failed requesting the role.
|
||||||
|
return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return respCreds, nil
|
||||||
|
}
|
191
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
Normal file
191
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
// Package endpointcreds provides support for retrieving credentials from an
|
||||||
|
// arbitrary HTTP endpoint.
|
||||||
|
//
|
||||||
|
// The credentials endpoint Provider can receive both static and refreshable
|
||||||
|
// credentials that will expire. Credentials are static when an "Expiration"
|
||||||
|
// value is not provided in the endpoint's response.
|
||||||
|
//
|
||||||
|
// Static credentials will never expire once they have been retrieved. The format
|
||||||
|
// of the static credentials response:
|
||||||
|
// {
|
||||||
|
// "AccessKeyId" : "MUA...",
|
||||||
|
// "SecretAccessKey" : "/7PC5om....",
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
|
||||||
|
// value in the response. The format of the refreshable credentials response:
|
||||||
|
// {
|
||||||
|
// "AccessKeyId" : "MUA...",
|
||||||
|
// "SecretAccessKey" : "/7PC5om....",
|
||||||
|
// "Token" : "AQoDY....=",
|
||||||
|
// "Expiration" : "2016-02-25T06:03:31Z"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Errors should be returned in the following format and only returned with 400
|
||||||
|
// or 500 HTTP status codes.
|
||||||
|
// {
|
||||||
|
// "code": "ErrorCode",
|
||||||
|
// "message": "Helpful error message."
|
||||||
|
// }
|
||||||
|
package endpointcreds
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProviderName is the name of the credentials provider.
|
||||||
|
const ProviderName = `CredentialsEndpointProvider`
|
||||||
|
|
||||||
|
// Provider satisfies the credentials.Provider interface, and is a client to
|
||||||
|
// retrieve credentials from an arbitrary endpoint.
|
||||||
|
type Provider struct {
|
||||||
|
staticCreds bool
|
||||||
|
credentials.Expiry
|
||||||
|
|
||||||
|
// Requires a AWS Client to make HTTP requests to the endpoint with.
|
||||||
|
// the Endpoint the request will be made to is provided by the aws.Config's
|
||||||
|
// Endpoint value.
|
||||||
|
Client *client.Client
|
||||||
|
|
||||||
|
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||||
|
// the credentials actually expiring. This is beneficial so race conditions
|
||||||
|
// with expiring credentials do not cause request to fail unexpectedly
|
||||||
|
// due to ExpiredTokenException exceptions.
|
||||||
|
//
|
||||||
|
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||||
|
// 10 seconds before the credentials are actually expired.
|
||||||
|
//
|
||||||
|
// If ExpiryWindow is 0 or less it will be ignored.
|
||||||
|
ExpiryWindow time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
|
||||||
|
// from arbitrary endpoint.
|
||||||
|
func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
|
||||||
|
p := &Provider{
|
||||||
|
Client: client.New(
|
||||||
|
cfg,
|
||||||
|
metadata.ClientInfo{
|
||||||
|
ServiceName: "CredentialsEndpoint",
|
||||||
|
Endpoint: endpoint,
|
||||||
|
},
|
||||||
|
handlers,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
|
||||||
|
p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||||
|
p.Client.Handlers.Validate.Clear()
|
||||||
|
p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
|
||||||
|
// from an arbitrary endpoint concurrently. The client will request the
|
||||||
|
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
|
||||||
|
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns true if the credentials retrieved are expired, or not yet
|
||||||
|
// retrieved.
|
||||||
|
func (p *Provider) IsExpired() bool {
|
||||||
|
if p.staticCreds {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return p.Expiry.IsExpired()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve will attempt to request the credentials from the endpoint the Provider
|
||||||
|
// was configured for. And error will be returned if the retrieval fails.
|
||||||
|
func (p *Provider) Retrieve() (credentials.Value, error) {
|
||||||
|
resp, err := p.getCredentials()
|
||||||
|
if err != nil {
|
||||||
|
return credentials.Value{ProviderName: ProviderName},
|
||||||
|
awserr.New("CredentialsEndpointError", "failed to load credentials", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.Expiration != nil {
|
||||||
|
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
|
||||||
|
} else {
|
||||||
|
p.staticCreds = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return credentials.Value{
|
||||||
|
AccessKeyID: resp.AccessKeyID,
|
||||||
|
SecretAccessKey: resp.SecretAccessKey,
|
||||||
|
SessionToken: resp.Token,
|
||||||
|
ProviderName: ProviderName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type getCredentialsOutput struct {
|
||||||
|
Expiration *time.Time
|
||||||
|
AccessKeyID string
|
||||||
|
SecretAccessKey string
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
type errorOutput struct {
|
||||||
|
Code string `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
|
||||||
|
op := &request.Operation{
|
||||||
|
Name: "GetCredentials",
|
||||||
|
HTTPMethod: "GET",
|
||||||
|
}
|
||||||
|
|
||||||
|
out := &getCredentialsOutput{}
|
||||||
|
req := p.Client.NewRequest(op, nil, out)
|
||||||
|
req.HTTPRequest.Header.Set("Accept", "application/json")
|
||||||
|
|
||||||
|
return out, req.Send()
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateEndpointHandler(r *request.Request) {
|
||||||
|
if len(r.ClientInfo.Endpoint) == 0 {
|
||||||
|
r.Error = aws.ErrMissingEndpoint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalHandler(r *request.Request) {
|
||||||
|
defer r.HTTPResponse.Body.Close()
|
||||||
|
|
||||||
|
out := r.Data.(*getCredentialsOutput)
|
||||||
|
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
|
||||||
|
r.Error = awserr.New("SerializationError",
|
||||||
|
"failed to decode endpoint credentials",
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalError(r *request.Request) {
|
||||||
|
defer r.HTTPResponse.Body.Close()
|
||||||
|
|
||||||
|
var errOut errorOutput
|
||||||
|
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
|
||||||
|
r.Error = awserr.New("SerializationError",
|
||||||
|
"failed to decode endpoint credentials",
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response body format is not consistent between metadata endpoints.
|
||||||
|
// Grab the error message as a string and include that as the source error
|
||||||
|
r.Error = awserr.New(errOut.Code, errOut.Message, nil)
|
||||||
|
}
|
77
vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
Normal file
77
vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EnvProviderName provides a name of Env provider
|
||||||
|
const EnvProviderName = "EnvProvider"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
|
||||||
|
// found in the process's environment.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
|
||||||
|
|
||||||
|
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
|
||||||
|
// can't be found in the process's environment.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// A EnvProvider retrieves credentials from the environment variables of the
|
||||||
|
// running process. Environment credentials never expire.
|
||||||
|
//
|
||||||
|
// Environment variables used:
|
||||||
|
//
|
||||||
|
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
|
||||||
|
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
|
||||||
|
type EnvProvider struct {
|
||||||
|
retrieved bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEnvCredentials returns a pointer to a new Credentials object
|
||||||
|
// wrapping the environment variable provider.
|
||||||
|
func NewEnvCredentials() *Credentials {
|
||||||
|
return NewCredentials(&EnvProvider{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve retrieves the keys from the environment.
|
||||||
|
func (e *EnvProvider) Retrieve() (Value, error) {
|
||||||
|
e.retrieved = false
|
||||||
|
|
||||||
|
id := os.Getenv("AWS_ACCESS_KEY_ID")
|
||||||
|
if id == "" {
|
||||||
|
id = os.Getenv("AWS_ACCESS_KEY")
|
||||||
|
}
|
||||||
|
|
||||||
|
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||||
|
if secret == "" {
|
||||||
|
secret = os.Getenv("AWS_SECRET_KEY")
|
||||||
|
}
|
||||||
|
|
||||||
|
if id == "" {
|
||||||
|
return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
if secret == "" {
|
||||||
|
return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
e.retrieved = true
|
||||||
|
return Value{
|
||||||
|
AccessKeyID: id,
|
||||||
|
SecretAccessKey: secret,
|
||||||
|
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
|
||||||
|
ProviderName: EnvProviderName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns if the credentials have been retrieved.
|
||||||
|
func (e *EnvProvider) IsExpired() bool {
|
||||||
|
return !e.retrieved
|
||||||
|
}
|
12
vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
Normal file
12
vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
[default]
|
||||||
|
aws_access_key_id = accessKey
|
||||||
|
aws_secret_access_key = secret
|
||||||
|
aws_session_token = token
|
||||||
|
|
||||||
|
[no_token]
|
||||||
|
aws_access_key_id = accessKey
|
||||||
|
aws_secret_access_key = secret
|
||||||
|
|
||||||
|
[with_colon]
|
||||||
|
aws_access_key_id: accessKey
|
||||||
|
aws_secret_access_key: secret
|
151
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
Normal file
151
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/go-ini/ini"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SharedCredsProviderName provides a name of SharedCreds provider
|
||||||
|
const SharedCredsProviderName = "SharedCredentialsProvider"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// A SharedCredentialsProvider retrieves credentials from the current user's home
|
||||||
|
// directory, and keeps track if those credentials are expired.
|
||||||
|
//
|
||||||
|
// Profile ini file example: $HOME/.aws/credentials
|
||||||
|
type SharedCredentialsProvider struct {
|
||||||
|
// Path to the shared credentials file.
|
||||||
|
//
|
||||||
|
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
|
||||||
|
// env value is empty will default to current user's home directory.
|
||||||
|
// Linux/OSX: "$HOME/.aws/credentials"
|
||||||
|
// Windows: "%USERPROFILE%\.aws\credentials"
|
||||||
|
Filename string
|
||||||
|
|
||||||
|
// AWS Profile to extract credentials from the shared credentials file. If empty
|
||||||
|
// will default to environment variable "AWS_PROFILE" or "default" if
|
||||||
|
// environment variable is also not set.
|
||||||
|
Profile string
|
||||||
|
|
||||||
|
// retrieved states if the credentials have been successfully retrieved.
|
||||||
|
retrieved bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSharedCredentials returns a pointer to a new Credentials object
|
||||||
|
// wrapping the Profile file provider.
|
||||||
|
func NewSharedCredentials(filename, profile string) *Credentials {
|
||||||
|
return NewCredentials(&SharedCredentialsProvider{
|
||||||
|
Filename: filename,
|
||||||
|
Profile: profile,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve reads and extracts the shared credentials from the current
|
||||||
|
// users home directory.
|
||||||
|
func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
|
||||||
|
p.retrieved = false
|
||||||
|
|
||||||
|
filename, err := p.filename()
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
creds, err := loadProfile(filename, p.profile())
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.retrieved = true
|
||||||
|
return creds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns if the shared credentials have expired.
|
||||||
|
func (p *SharedCredentialsProvider) IsExpired() bool {
|
||||||
|
return !p.retrieved
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
|
||||||
|
// The credentials retrieved from the profile will be returned or error. Error will be
|
||||||
|
// returned if it fails to read from the file, or the data is invalid.
|
||||||
|
func loadProfile(filename, profile string) (Value, error) {
|
||||||
|
config, err := ini.Load(filename)
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
|
||||||
|
}
|
||||||
|
iniProfile, err := config.GetSection(profile)
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := iniProfile.GetKey("aws_access_key_id")
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
|
||||||
|
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
secret, err := iniProfile.GetKey("aws_secret_access_key")
|
||||||
|
if err != nil {
|
||||||
|
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
|
||||||
|
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
|
||||||
|
nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to empty string if not found
|
||||||
|
token := iniProfile.Key("aws_session_token")
|
||||||
|
|
||||||
|
return Value{
|
||||||
|
AccessKeyID: id.String(),
|
||||||
|
SecretAccessKey: secret.String(),
|
||||||
|
SessionToken: token.String(),
|
||||||
|
ProviderName: SharedCredsProviderName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// filename returns the filename to use to read AWS shared credentials.
|
||||||
|
//
|
||||||
|
// Will return an error if the user's home directory path cannot be found.
|
||||||
|
func (p *SharedCredentialsProvider) filename() (string, error) {
|
||||||
|
if p.Filename == "" {
|
||||||
|
if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
|
||||||
|
return p.Filename, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
homeDir := os.Getenv("HOME") // *nix
|
||||||
|
if homeDir == "" { // Windows
|
||||||
|
homeDir = os.Getenv("USERPROFILE")
|
||||||
|
}
|
||||||
|
if homeDir == "" {
|
||||||
|
return "", ErrSharedCredentialsHomeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Filename = filepath.Join(homeDir, ".aws", "credentials")
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Filename, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// profile returns the AWS shared credentials profile. If empty will read
|
||||||
|
// environment variable "AWS_PROFILE". If that is not set profile will
|
||||||
|
// return "default".
|
||||||
|
func (p *SharedCredentialsProvider) profile() string {
|
||||||
|
if p.Profile == "" {
|
||||||
|
p.Profile = os.Getenv("AWS_PROFILE")
|
||||||
|
}
|
||||||
|
if p.Profile == "" {
|
||||||
|
p.Profile = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Profile
|
||||||
|
}
|
57
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
Normal file
57
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StaticProviderName provides a name of Static provider
|
||||||
|
const StaticProviderName = "StaticProvider"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// A StaticProvider is a set of credentials which are set programmatically,
|
||||||
|
// and will never expire.
|
||||||
|
type StaticProvider struct {
|
||||||
|
Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStaticCredentials returns a pointer to a new Credentials object
|
||||||
|
// wrapping a static credentials value provider.
|
||||||
|
func NewStaticCredentials(id, secret, token string) *Credentials {
|
||||||
|
return NewCredentials(&StaticProvider{Value: Value{
|
||||||
|
AccessKeyID: id,
|
||||||
|
SecretAccessKey: secret,
|
||||||
|
SessionToken: token,
|
||||||
|
}})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
|
||||||
|
// wrapping the static credentials value provide. Same as NewStaticCredentials
|
||||||
|
// but takes the creds Value instead of individual fields
|
||||||
|
func NewStaticCredentialsFromCreds(creds Value) *Credentials {
|
||||||
|
return NewCredentials(&StaticProvider{Value: creds})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve returns the credentials or error if the credentials are invalid.
|
||||||
|
func (s *StaticProvider) Retrieve() (Value, error) {
|
||||||
|
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
|
||||||
|
return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.Value.ProviderName) == 0 {
|
||||||
|
s.Value.ProviderName = StaticProviderName
|
||||||
|
}
|
||||||
|
return s.Value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns if the credentials are expired.
|
||||||
|
//
|
||||||
|
// For StaticProvider, the credentials never expired.
|
||||||
|
func (s *StaticProvider) IsExpired() bool {
|
||||||
|
return false
|
||||||
|
}
|
161
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
Normal file
161
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
||||||
|
// Package stscreds are credential Providers to retrieve STS AWS credentials.
|
||||||
|
//
|
||||||
|
// STS provides multiple ways to retrieve credentials which can be used when making
|
||||||
|
// future AWS service API operation calls.
|
||||||
|
package stscreds
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/service/sts"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProviderName provides a name of AssumeRole provider
|
||||||
|
const ProviderName = "AssumeRoleProvider"
|
||||||
|
|
||||||
|
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
|
||||||
|
type AssumeRoler interface {
|
||||||
|
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultDuration is the default amount of time in minutes that the credentials
|
||||||
|
// will be valid for.
|
||||||
|
var DefaultDuration = time.Duration(15) * time.Minute
|
||||||
|
|
||||||
|
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
|
||||||
|
// keeps track of their expiration time. This provider must be used explicitly,
|
||||||
|
// as it is not included in the credentials chain.
|
||||||
|
type AssumeRoleProvider struct {
|
||||||
|
credentials.Expiry
|
||||||
|
|
||||||
|
// STS client to make assume role request with.
|
||||||
|
Client AssumeRoler
|
||||||
|
|
||||||
|
// Role to be assumed.
|
||||||
|
RoleARN string
|
||||||
|
|
||||||
|
// Session name, if you wish to reuse the credentials elsewhere.
|
||||||
|
RoleSessionName string
|
||||||
|
|
||||||
|
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
|
||||||
|
Duration time.Duration
|
||||||
|
|
||||||
|
// Optional ExternalID to pass along, defaults to nil if not set.
|
||||||
|
ExternalID *string
|
||||||
|
|
||||||
|
// The policy plain text must be 2048 bytes or shorter. However, an internal
|
||||||
|
// conversion compresses it into a packed binary format with a separate limit.
|
||||||
|
// The PackedPolicySize response element indicates by percentage how close to
|
||||||
|
// the upper size limit the policy is, with 100% equaling the maximum allowed
|
||||||
|
// size.
|
||||||
|
Policy *string
|
||||||
|
|
||||||
|
// The identification number of the MFA device that is associated with the user
|
||||||
|
// who is making the AssumeRole call. Specify this value if the trust policy
|
||||||
|
// of the role being assumed includes a condition that requires MFA authentication.
|
||||||
|
// The value is either the serial number for a hardware device (such as GAHT12345678)
|
||||||
|
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
|
||||||
|
SerialNumber *string
|
||||||
|
|
||||||
|
// The value provided by the MFA device, if the trust policy of the role being
|
||||||
|
// assumed requires MFA (that is, if the policy includes a condition that tests
|
||||||
|
// for MFA). If the role being assumed requires MFA and if the TokenCode value
|
||||||
|
// is missing or expired, the AssumeRole call returns an "access denied" error.
|
||||||
|
TokenCode *string
|
||||||
|
|
||||||
|
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||||
|
// the credentials actually expiring. This is beneficial so race conditions
|
||||||
|
// with expiring credentials do not cause request to fail unexpectedly
|
||||||
|
// due to ExpiredTokenException exceptions.
|
||||||
|
//
|
||||||
|
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||||
|
// 10 seconds before the credentials are actually expired.
|
||||||
|
//
|
||||||
|
// If ExpiryWindow is 0 or less it will be ignored.
|
||||||
|
ExpiryWindow time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
||||||
|
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||||
|
// role will be named after a nanosecond timestamp of this operation.
|
||||||
|
//
|
||||||
|
// Takes a Config provider to create the STS client. The ConfigProvider is
|
||||||
|
// satisfied by the session.Session type.
|
||||||
|
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||||
|
p := &AssumeRoleProvider{
|
||||||
|
Client: sts.New(c),
|
||||||
|
RoleARN: roleARN,
|
||||||
|
Duration: DefaultDuration,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return credentials.NewCredentials(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
|
||||||
|
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||||
|
// role will be named after a nanosecond timestamp of this operation.
|
||||||
|
//
|
||||||
|
// Takes an AssumeRoler which can be satisfiede by the STS client.
|
||||||
|
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||||
|
p := &AssumeRoleProvider{
|
||||||
|
Client: svc,
|
||||||
|
RoleARN: roleARN,
|
||||||
|
Duration: DefaultDuration,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return credentials.NewCredentials(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve generates a new set of temporary credentials using STS.
|
||||||
|
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
|
||||||
|
|
||||||
|
// Apply defaults where parameters are not set.
|
||||||
|
if p.RoleSessionName == "" {
|
||||||
|
// Try to work out a role name that will hopefully end up unique.
|
||||||
|
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
|
||||||
|
}
|
||||||
|
if p.Duration == 0 {
|
||||||
|
// Expire as often as AWS permits.
|
||||||
|
p.Duration = DefaultDuration
|
||||||
|
}
|
||||||
|
input := &sts.AssumeRoleInput{
|
||||||
|
DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
|
||||||
|
RoleArn: aws.String(p.RoleARN),
|
||||||
|
RoleSessionName: aws.String(p.RoleSessionName),
|
||||||
|
ExternalId: p.ExternalID,
|
||||||
|
}
|
||||||
|
if p.Policy != nil {
|
||||||
|
input.Policy = p.Policy
|
||||||
|
}
|
||||||
|
if p.SerialNumber != nil && p.TokenCode != nil {
|
||||||
|
input.SerialNumber = p.SerialNumber
|
||||||
|
input.TokenCode = p.TokenCode
|
||||||
|
}
|
||||||
|
roleOutput, err := p.Client.AssumeRole(input)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return credentials.Value{ProviderName: ProviderName}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We will proactively generate new credentials before they expire.
|
||||||
|
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
|
||||||
|
|
||||||
|
return credentials.Value{
|
||||||
|
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
|
||||||
|
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
|
||||||
|
SessionToken: *roleOutput.Credentials.SessionToken,
|
||||||
|
ProviderName: ProviderName,
|
||||||
|
}, nil
|
||||||
|
}
|
129
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
Normal file
129
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
||||||
|
// Package defaults is a collection of helpers to retrieve the SDK's default
|
||||||
|
// configuration and handlers.
|
||||||
|
//
|
||||||
|
// Generally this package shouldn't be used directly, but session.Session
|
||||||
|
// instead. This package is useful when you need to reset the defaults
|
||||||
|
// of a session or service client to the SDK defaults before setting
|
||||||
|
// additional parameters.
|
||||||
|
package defaults
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
"github.com/aws/aws-sdk-go/private/endpoints"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Defaults provides a collection of default values for SDK clients.
|
||||||
|
type Defaults struct {
|
||||||
|
Config *aws.Config
|
||||||
|
Handlers request.Handlers
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the SDK's default values with Config and handlers pre-configured.
|
||||||
|
func Get() Defaults {
|
||||||
|
cfg := Config()
|
||||||
|
handlers := Handlers()
|
||||||
|
cfg.Credentials = CredChain(cfg, handlers)
|
||||||
|
|
||||||
|
return Defaults{
|
||||||
|
Config: cfg,
|
||||||
|
Handlers: handlers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config returns the default configuration without credentials.
|
||||||
|
// To retrieve a config with credentials also included use
|
||||||
|
// `defaults.Get().Config` instead.
|
||||||
|
//
|
||||||
|
// Generally you shouldn't need to use this method directly, but
|
||||||
|
// is available if you need to reset the configuration of an
|
||||||
|
// existing service client or session.
|
||||||
|
func Config() *aws.Config {
|
||||||
|
return aws.NewConfig().
|
||||||
|
WithCredentials(credentials.AnonymousCredentials).
|
||||||
|
WithRegion(os.Getenv("AWS_REGION")).
|
||||||
|
WithHTTPClient(http.DefaultClient).
|
||||||
|
WithMaxRetries(aws.UseServiceDefaultRetries).
|
||||||
|
WithLogger(aws.NewDefaultLogger()).
|
||||||
|
WithLogLevel(aws.LogOff).
|
||||||
|
WithSleepDelay(time.Sleep)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handlers returns the default request handlers.
|
||||||
|
//
|
||||||
|
// Generally you shouldn't need to use this method directly, but
|
||||||
|
// is available if you need to reset the request handlers of an
|
||||||
|
// existing service client or session.
|
||||||
|
func Handlers() request.Handlers {
|
||||||
|
var handlers request.Handlers
|
||||||
|
|
||||||
|
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
||||||
|
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
|
||||||
|
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
|
||||||
|
handlers.Build.AfterEachFn = request.HandlerListStopOnError
|
||||||
|
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||||
|
handlers.Send.PushBackNamed(corehandlers.SendHandler)
|
||||||
|
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
|
||||||
|
handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
|
||||||
|
|
||||||
|
return handlers
|
||||||
|
}
|
||||||
|
|
||||||
|
// CredChain returns the default credential chain.
|
||||||
|
//
|
||||||
|
// Generally you shouldn't need to use this method directly, but
|
||||||
|
// is available if you need to reset the credentials of an
|
||||||
|
// existing service client or session's Config.
|
||||||
|
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
|
||||||
|
return credentials.NewCredentials(&credentials.ChainProvider{
|
||||||
|
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
|
||||||
|
Providers: []credentials.Provider{
|
||||||
|
&credentials.EnvProvider{},
|
||||||
|
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
||||||
|
RemoteCredProvider(*cfg, handlers),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoteCredProvider returns a credenitials provider for the default remote
|
||||||
|
// endpoints such as EC2 or ECS Roles.
|
||||||
|
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
|
||||||
|
ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
|
||||||
|
|
||||||
|
if len(ecsCredURI) > 0 {
|
||||||
|
return ecsCredProvider(cfg, handlers, ecsCredURI)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ec2RoleProvider(cfg, handlers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider {
|
||||||
|
const host = `169.254.170.2`
|
||||||
|
|
||||||
|
return endpointcreds.NewProviderClient(cfg, handlers,
|
||||||
|
fmt.Sprintf("http://%s%s", host, uri),
|
||||||
|
func(p *endpointcreds.Provider) {
|
||||||
|
p.ExpiryWindow = 5 * time.Minute
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
|
||||||
|
endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName,
|
||||||
|
aws.StringValue(cfg.Region), true, false)
|
||||||
|
|
||||||
|
return &ec2rolecreds.EC2RoleProvider{
|
||||||
|
Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion),
|
||||||
|
ExpiryWindow: 5 * time.Minute,
|
||||||
|
}
|
||||||
|
}
|
140
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
Normal file
140
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
package ec2metadata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetMetadata uses the path provided to request information from the EC2
|
||||||
|
// instance metdata service. The content will be returned as a string, or
|
||||||
|
// error if the request failed.
|
||||||
|
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||||
|
op := &request.Operation{
|
||||||
|
Name: "GetMetadata",
|
||||||
|
HTTPMethod: "GET",
|
||||||
|
HTTPPath: path.Join("/", "meta-data", p),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := &metadataOutput{}
|
||||||
|
req := c.NewRequest(op, nil, output)
|
||||||
|
|
||||||
|
return output.Content, req.Send()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDynamicData uses the path provided to request information from the EC2
|
||||||
|
// instance metadata service for dynamic data. The content will be returned
|
||||||
|
// as a string, or error if the request failed.
|
||||||
|
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
||||||
|
op := &request.Operation{
|
||||||
|
Name: "GetDynamicData",
|
||||||
|
HTTPMethod: "GET",
|
||||||
|
HTTPPath: path.Join("/", "dynamic", p),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := &metadataOutput{}
|
||||||
|
req := c.NewRequest(op, nil, output)
|
||||||
|
|
||||||
|
return output.Content, req.Send()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInstanceIdentityDocument retrieves an identity document describing an
|
||||||
|
// instance. Error is returned if the request fails or is unable to parse
|
||||||
|
// the response.
|
||||||
|
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
|
||||||
|
resp, err := c.GetDynamicData("instance-identity/document")
|
||||||
|
if err != nil {
|
||||||
|
return EC2InstanceIdentityDocument{},
|
||||||
|
awserr.New("EC2MetadataRequestError",
|
||||||
|
"failed to get EC2 instance identity document", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
doc := EC2InstanceIdentityDocument{}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
|
||||||
|
return EC2InstanceIdentityDocument{},
|
||||||
|
awserr.New("SerializationError",
|
||||||
|
"failed to decode EC2 instance identity document", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return doc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IAMInfo retrieves IAM info from the metadata API
|
||||||
|
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
|
||||||
|
resp, err := c.GetMetadata("iam/info")
|
||||||
|
if err != nil {
|
||||||
|
return EC2IAMInfo{},
|
||||||
|
awserr.New("EC2MetadataRequestError",
|
||||||
|
"failed to get EC2 IAM info", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
info := EC2IAMInfo{}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
|
||||||
|
return EC2IAMInfo{},
|
||||||
|
awserr.New("SerializationError",
|
||||||
|
"failed to decode EC2 IAM info", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Code != "Success" {
|
||||||
|
errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
|
||||||
|
return EC2IAMInfo{},
|
||||||
|
awserr.New("EC2MetadataError", errMsg, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Region returns the region the instance is running in.
|
||||||
|
func (c *EC2Metadata) Region() (string, error) {
|
||||||
|
resp, err := c.GetMetadata("placement/availability-zone")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns region without the suffix. Eg: us-west-2a becomes us-west-2
|
||||||
|
return resp[:len(resp)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Available returns if the application has access to the EC2 Metadata service.
|
||||||
|
// Can be used to determine if application is running within an EC2 Instance and
|
||||||
|
// the metadata service is available.
|
||||||
|
func (c *EC2Metadata) Available() bool {
|
||||||
|
if _, err := c.GetMetadata("instance-id"); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// An EC2IAMInfo provides the shape for unmarshalling
|
||||||
|
// an IAM info from the metadata API
|
||||||
|
type EC2IAMInfo struct {
|
||||||
|
Code string
|
||||||
|
LastUpdated time.Time
|
||||||
|
InstanceProfileArn string
|
||||||
|
InstanceProfileID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// An EC2InstanceIdentityDocument provides the shape for unmarshalling
|
||||||
|
// an instance identity document
|
||||||
|
type EC2InstanceIdentityDocument struct {
|
||||||
|
DevpayProductCodes []string `json:"devpayProductCodes"`
|
||||||
|
AvailabilityZone string `json:"availabilityZone"`
|
||||||
|
PrivateIP string `json:"privateIp"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
Region string `json:"region"`
|
||||||
|
InstanceID string `json:"instanceId"`
|
||||||
|
BillingProducts []string `json:"billingProducts"`
|
||||||
|
InstanceType string `json:"instanceType"`
|
||||||
|
AccountID string `json:"accountId"`
|
||||||
|
PendingTime time.Time `json:"pendingTime"`
|
||||||
|
ImageID string `json:"imageId"`
|
||||||
|
KernelID string `json:"kernelId"`
|
||||||
|
RamdiskID string `json:"ramdiskId"`
|
||||||
|
Architecture string `json:"architecture"`
|
||||||
|
}
|
124
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
Normal file
124
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
Normal file
|
@ -0,0 +1,124 @@
|
||||||
|
// Package ec2metadata provides the client for making API calls to the
|
||||||
|
// EC2 Metadata service.
|
||||||
|
package ec2metadata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServiceName is the name of the service.
|
||||||
|
const ServiceName = "ec2metadata"
|
||||||
|
|
||||||
|
// A EC2Metadata is an EC2 Metadata service Client.
|
||||||
|
type EC2Metadata struct {
|
||||||
|
*client.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new instance of the EC2Metadata client with a session.
|
||||||
|
// This client is safe to use across multiple goroutines.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// // Create a EC2Metadata client from just a session.
|
||||||
|
// svc := ec2metadata.New(mySession)
|
||||||
|
//
|
||||||
|
// // Create a EC2Metadata client with additional configuration
|
||||||
|
// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
|
||||||
|
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
|
||||||
|
c := p.ClientConfig(ServiceName, cfgs...)
|
||||||
|
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient returns a new EC2Metadata client. Should be used to create
|
||||||
|
// a client when not using a session. Generally using just New with a session
|
||||||
|
// is preferred.
|
||||||
|
//
|
||||||
|
// If an unmodified HTTP client is provided from the stdlib default, or no client
|
||||||
|
// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
|
||||||
|
// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
|
||||||
|
func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
|
||||||
|
if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
|
||||||
|
// If the http client is unmodified and this feature is not disabled
|
||||||
|
// set custom timeouts for EC2Metadata requests.
|
||||||
|
cfg.HTTPClient = &http.Client{
|
||||||
|
// use a shorter timeout than default because the metadata
|
||||||
|
// service is local if it is running, and to fail faster
|
||||||
|
// if not running on an ec2 instance.
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
svc := &EC2Metadata{
|
||||||
|
Client: client.New(
|
||||||
|
cfg,
|
||||||
|
metadata.ClientInfo{
|
||||||
|
ServiceName: ServiceName,
|
||||||
|
Endpoint: endpoint,
|
||||||
|
APIVersion: "latest",
|
||||||
|
},
|
||||||
|
handlers,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
|
||||||
|
svc.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||||
|
svc.Handlers.Validate.Clear()
|
||||||
|
svc.Handlers.Validate.PushBack(validateEndpointHandler)
|
||||||
|
|
||||||
|
// Add additional options to the service config
|
||||||
|
for _, option := range opts {
|
||||||
|
option(svc.Client)
|
||||||
|
}
|
||||||
|
|
||||||
|
return svc
|
||||||
|
}
|
||||||
|
|
||||||
|
func httpClientZero(c *http.Client) bool {
|
||||||
|
return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
type metadataOutput struct {
|
||||||
|
Content string
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalHandler(r *request.Request) {
|
||||||
|
defer r.HTTPResponse.Body.Close()
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||||
|
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if data, ok := r.Data.(*metadataOutput); ok {
|
||||||
|
data.Content = b.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalError(r *request.Request) {
|
||||||
|
defer r.HTTPResponse.Body.Close()
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||||
|
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response body format is not consistent between metadata endpoints.
|
||||||
|
// Grab the error message as a string and include that as the source error
|
||||||
|
r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateEndpointHandler(r *request.Request) {
|
||||||
|
if r.ClientInfo.Endpoint == "" {
|
||||||
|
r.Error = aws.ErrMissingEndpoint
|
||||||
|
}
|
||||||
|
}
|
17
vendor/github.com/aws/aws-sdk-go/aws/errors.go
generated
vendored
Normal file
17
vendor/github.com/aws/aws-sdk-go/aws/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import "github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrMissingRegion is an error that is returned if region configuration is
|
||||||
|
// not found.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
|
||||||
|
|
||||||
|
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
|
||||||
|
// resolved for a service.
|
||||||
|
//
|
||||||
|
// @readonly
|
||||||
|
ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
|
||||||
|
)
|
112
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
Normal file
112
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A LogLevelType defines the level logging should be performed at. Used to instruct
|
||||||
|
// the SDK which statements should be logged.
|
||||||
|
type LogLevelType uint
|
||||||
|
|
||||||
|
// LogLevel returns the pointer to a LogLevel. Should be used to workaround
|
||||||
|
// not being able to take the address of a non-composite literal.
|
||||||
|
func LogLevel(l LogLevelType) *LogLevelType {
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the LogLevel value or the default value LogOff if the LogLevel
|
||||||
|
// is nil. Safe to use on nil value LogLevelTypes.
|
||||||
|
func (l *LogLevelType) Value() LogLevelType {
|
||||||
|
if l != nil {
|
||||||
|
return *l
|
||||||
|
}
|
||||||
|
return LogOff
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
|
||||||
|
// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
|
||||||
|
// LogLevel is nill, will default to LogOff comparison.
|
||||||
|
func (l *LogLevelType) Matches(v LogLevelType) bool {
|
||||||
|
c := l.Value()
|
||||||
|
return c&v == v
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
|
||||||
|
// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
|
||||||
|
// to LogOff comparison.
|
||||||
|
func (l *LogLevelType) AtLeast(v LogLevelType) bool {
|
||||||
|
c := l.Value()
|
||||||
|
return c >= v
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// LogOff states that no logging should be performed by the SDK. This is the
|
||||||
|
// default state of the SDK, and should be use to disable all logging.
|
||||||
|
LogOff LogLevelType = iota * 0x1000
|
||||||
|
|
||||||
|
// LogDebug state that debug output should be logged by the SDK. This should
|
||||||
|
// be used to inspect request made and responses received.
|
||||||
|
LogDebug
|
||||||
|
)
|
||||||
|
|
||||||
|
// Debug Logging Sub Levels
|
||||||
|
const (
|
||||||
|
// LogDebugWithSigning states that the SDK should log request signing and
|
||||||
|
// presigning events. This should be used to log the signing details of
|
||||||
|
// requests for debugging. Will also enable LogDebug.
|
||||||
|
LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
|
||||||
|
|
||||||
|
// LogDebugWithHTTPBody states the SDK should log HTTP request and response
|
||||||
|
// HTTP bodys in addition to the headers and path. This should be used to
|
||||||
|
// see the body content of requests and responses made while using the SDK
|
||||||
|
// Will also enable LogDebug.
|
||||||
|
LogDebugWithHTTPBody
|
||||||
|
|
||||||
|
// LogDebugWithRequestRetries states the SDK should log when service requests will
|
||||||
|
// be retried. This should be used to log when you want to log when service
|
||||||
|
// requests are being retried. Will also enable LogDebug.
|
||||||
|
LogDebugWithRequestRetries
|
||||||
|
|
||||||
|
// LogDebugWithRequestErrors states the SDK should log when service requests fail
|
||||||
|
// to build, send, validate, or unmarshal.
|
||||||
|
LogDebugWithRequestErrors
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Logger is a minimalistic interface for the SDK to log messages to. Should
|
||||||
|
// be used to provide custom logging writers for the SDK to use.
|
||||||
|
type Logger interface {
|
||||||
|
Log(...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// A LoggerFunc is a convenience type to convert a function taking a variadic
|
||||||
|
// list of arguments and wrap it so the Logger interface can be used.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
|
||||||
|
// fmt.Fprintln(os.Stdout, args...)
|
||||||
|
// })})
|
||||||
|
type LoggerFunc func(...interface{})
|
||||||
|
|
||||||
|
// Log calls the wrapped function with the arguments provided
|
||||||
|
func (f LoggerFunc) Log(args ...interface{}) {
|
||||||
|
f(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
|
||||||
|
// use same formatting runes as the stdlib log.Logger
|
||||||
|
func NewDefaultLogger() Logger {
|
||||||
|
return &defaultLogger{
|
||||||
|
logger: log.New(os.Stdout, "", log.LstdFlags),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
|
||||||
|
type defaultLogger struct {
|
||||||
|
logger *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log logs the parameters to the stdlib logger. See log.Println.
|
||||||
|
func (l defaultLogger) Log(args ...interface{}) {
|
||||||
|
l.logger.Println(args...)
|
||||||
|
}
|
187
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
Normal file
187
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
Normal file
|
@ -0,0 +1,187 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Handlers provides a collection of request handlers for various
|
||||||
|
// stages of handling requests.
|
||||||
|
type Handlers struct {
|
||||||
|
Validate HandlerList
|
||||||
|
Build HandlerList
|
||||||
|
Sign HandlerList
|
||||||
|
Send HandlerList
|
||||||
|
ValidateResponse HandlerList
|
||||||
|
Unmarshal HandlerList
|
||||||
|
UnmarshalMeta HandlerList
|
||||||
|
UnmarshalError HandlerList
|
||||||
|
Retry HandlerList
|
||||||
|
AfterRetry HandlerList
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy returns of this handler's lists.
|
||||||
|
func (h *Handlers) Copy() Handlers {
|
||||||
|
return Handlers{
|
||||||
|
Validate: h.Validate.copy(),
|
||||||
|
Build: h.Build.copy(),
|
||||||
|
Sign: h.Sign.copy(),
|
||||||
|
Send: h.Send.copy(),
|
||||||
|
ValidateResponse: h.ValidateResponse.copy(),
|
||||||
|
Unmarshal: h.Unmarshal.copy(),
|
||||||
|
UnmarshalError: h.UnmarshalError.copy(),
|
||||||
|
UnmarshalMeta: h.UnmarshalMeta.copy(),
|
||||||
|
Retry: h.Retry.copy(),
|
||||||
|
AfterRetry: h.AfterRetry.copy(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear removes callback functions for all handlers
|
||||||
|
func (h *Handlers) Clear() {
|
||||||
|
h.Validate.Clear()
|
||||||
|
h.Build.Clear()
|
||||||
|
h.Send.Clear()
|
||||||
|
h.Sign.Clear()
|
||||||
|
h.Unmarshal.Clear()
|
||||||
|
h.UnmarshalMeta.Clear()
|
||||||
|
h.UnmarshalError.Clear()
|
||||||
|
h.ValidateResponse.Clear()
|
||||||
|
h.Retry.Clear()
|
||||||
|
h.AfterRetry.Clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
// A HandlerListRunItem represents an entry in the HandlerList which
|
||||||
|
// is being run.
|
||||||
|
type HandlerListRunItem struct {
|
||||||
|
Index int
|
||||||
|
Handler NamedHandler
|
||||||
|
Request *Request
|
||||||
|
}
|
||||||
|
|
||||||
|
// A HandlerList manages zero or more handlers in a list.
|
||||||
|
type HandlerList struct {
|
||||||
|
list []NamedHandler
|
||||||
|
|
||||||
|
// Called after each request handler in the list is called. If set
|
||||||
|
// and the func returns true the HandlerList will continue to iterate
|
||||||
|
// over the request handlers. If false is returned the HandlerList
|
||||||
|
// will stop iterating.
|
||||||
|
//
|
||||||
|
// Should be used if extra logic to be performed between each handler
|
||||||
|
// in the list. This can be used to terminate a list's iteration
|
||||||
|
// based on a condition such as error like, HandlerListStopOnError.
|
||||||
|
// Or for logging like HandlerListLogItem.
|
||||||
|
AfterEachFn func(item HandlerListRunItem) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// A NamedHandler is a struct that contains a name and function callback.
|
||||||
|
type NamedHandler struct {
|
||||||
|
Name string
|
||||||
|
Fn func(*Request)
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy creates a copy of the handler list.
|
||||||
|
func (l *HandlerList) copy() HandlerList {
|
||||||
|
n := HandlerList{
|
||||||
|
AfterEachFn: l.AfterEachFn,
|
||||||
|
}
|
||||||
|
n.list = append([]NamedHandler{}, l.list...)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear clears the handler list.
|
||||||
|
func (l *HandlerList) Clear() {
|
||||||
|
l.list = []NamedHandler{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of handlers in the list.
|
||||||
|
func (l *HandlerList) Len() int {
|
||||||
|
return len(l.list)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushBack pushes handler f to the back of the handler list.
|
||||||
|
func (l *HandlerList) PushBack(f func(*Request)) {
|
||||||
|
l.list = append(l.list, NamedHandler{"__anonymous", f})
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushFront pushes handler f to the front of the handler list.
|
||||||
|
func (l *HandlerList) PushFront(f func(*Request)) {
|
||||||
|
l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushBackNamed pushes named handler f to the back of the handler list.
|
||||||
|
func (l *HandlerList) PushBackNamed(n NamedHandler) {
|
||||||
|
l.list = append(l.list, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushFrontNamed pushes named handler f to the front of the handler list.
|
||||||
|
func (l *HandlerList) PushFrontNamed(n NamedHandler) {
|
||||||
|
l.list = append([]NamedHandler{n}, l.list...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes a NamedHandler n
|
||||||
|
func (l *HandlerList) Remove(n NamedHandler) {
|
||||||
|
newlist := []NamedHandler{}
|
||||||
|
for _, m := range l.list {
|
||||||
|
if m.Name != n.Name {
|
||||||
|
newlist = append(newlist, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l.list = newlist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run executes all handlers in the list with a given request object.
|
||||||
|
func (l *HandlerList) Run(r *Request) {
|
||||||
|
for i, h := range l.list {
|
||||||
|
h.Fn(r)
|
||||||
|
item := HandlerListRunItem{
|
||||||
|
Index: i, Handler: h, Request: r,
|
||||||
|
}
|
||||||
|
if l.AfterEachFn != nil && !l.AfterEachFn(item) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerListLogItem logs the request handler and the state of the
|
||||||
|
// request's Error value. Always returns true to continue iterating
|
||||||
|
// request handlers in a HandlerList.
|
||||||
|
func HandlerListLogItem(item HandlerListRunItem) bool {
|
||||||
|
if item.Request.Config.Logger == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
item.Request.Config.Logger.Log("DEBUG: RequestHandler",
|
||||||
|
item.Index, item.Handler.Name, item.Request.Error)
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerListStopOnError returns false to stop the HandlerList iterating
|
||||||
|
// over request handlers if Request.Error is not nil. True otherwise
|
||||||
|
// to continue iterating.
|
||||||
|
func HandlerListStopOnError(item HandlerListRunItem) bool {
|
||||||
|
return item.Request.Error == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
|
||||||
|
// header. If the extra parameters are provided they will be added as metadata to the
|
||||||
|
// name/version pair resulting in the following format.
|
||||||
|
// "name/version (extra0; extra1; ...)"
|
||||||
|
// The user agent part will be concatenated with this current request's user agent string.
|
||||||
|
func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
|
||||||
|
ua := fmt.Sprintf("%s/%s", name, version)
|
||||||
|
if len(extra) > 0 {
|
||||||
|
ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
|
||||||
|
}
|
||||||
|
return func(r *Request) {
|
||||||
|
AddToUserAgent(r, ua)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
|
||||||
|
// The input string will be concatenated with the current request's user agent string.
|
||||||
|
func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
|
||||||
|
return func(r *Request) {
|
||||||
|
AddToUserAgent(r, s)
|
||||||
|
}
|
||||||
|
}
|
33
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
Normal file
33
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
// +build go1.5
|
||||||
|
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
|
||||||
|
req := &http.Request{
|
||||||
|
URL: &url.URL{},
|
||||||
|
Header: http.Header{},
|
||||||
|
Close: r.Close,
|
||||||
|
Body: body,
|
||||||
|
Host: r.Host,
|
||||||
|
Method: r.Method,
|
||||||
|
Proto: r.Proto,
|
||||||
|
ContentLength: r.ContentLength,
|
||||||
|
// Cancel will be deprecated in 1.7 and will be replaced with Context
|
||||||
|
Cancel: r.Cancel,
|
||||||
|
}
|
||||||
|
|
||||||
|
*req.URL = *r.URL
|
||||||
|
for k, v := range r.Header {
|
||||||
|
for _, vv := range v {
|
||||||
|
req.Header.Add(k, vv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
31
vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go
generated
vendored
Normal file
31
vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
// +build !go1.5
|
||||||
|
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
|
||||||
|
req := &http.Request{
|
||||||
|
URL: &url.URL{},
|
||||||
|
Header: http.Header{},
|
||||||
|
Close: r.Close,
|
||||||
|
Body: body,
|
||||||
|
Host: r.Host,
|
||||||
|
Method: r.Method,
|
||||||
|
Proto: r.Proto,
|
||||||
|
ContentLength: r.ContentLength,
|
||||||
|
}
|
||||||
|
|
||||||
|
*req.URL = *r.URL
|
||||||
|
for k, v := range r.Header {
|
||||||
|
for _, vv := range v {
|
||||||
|
req.Header.Add(k, vv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
49
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
Normal file
49
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// offsetReader is a thread-safe io.ReadCloser to prevent racing
|
||||||
|
// with retrying requests
|
||||||
|
type offsetReader struct {
|
||||||
|
buf io.ReadSeeker
|
||||||
|
lock sync.RWMutex
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
|
||||||
|
reader := &offsetReader{}
|
||||||
|
buf.Seek(offset, 0)
|
||||||
|
|
||||||
|
reader.buf = buf
|
||||||
|
return reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close is a thread-safe close. Uses the write lock.
|
||||||
|
func (o *offsetReader) Close() error {
|
||||||
|
o.lock.Lock()
|
||||||
|
defer o.lock.Unlock()
|
||||||
|
o.closed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read is a thread-safe read using a read lock.
|
||||||
|
func (o *offsetReader) Read(p []byte) (int, error) {
|
||||||
|
o.lock.RLock()
|
||||||
|
defer o.lock.RUnlock()
|
||||||
|
|
||||||
|
if o.closed {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.buf.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseAndCopy will return a new offsetReader with a copy of the old buffer
|
||||||
|
// and close the old buffer.
|
||||||
|
func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
|
||||||
|
o.Close()
|
||||||
|
return newOffsetReader(o.buf, offset)
|
||||||
|
}
|
326
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
Normal file
326
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
Normal file
|
@ -0,0 +1,326 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Request is the service request to be made.
|
||||||
|
type Request struct {
|
||||||
|
Config aws.Config
|
||||||
|
ClientInfo metadata.ClientInfo
|
||||||
|
Handlers Handlers
|
||||||
|
|
||||||
|
Retryer
|
||||||
|
Time time.Time
|
||||||
|
ExpireTime time.Duration
|
||||||
|
Operation *Operation
|
||||||
|
HTTPRequest *http.Request
|
||||||
|
HTTPResponse *http.Response
|
||||||
|
Body io.ReadSeeker
|
||||||
|
BodyStart int64 // offset from beginning of Body that the request body starts
|
||||||
|
Params interface{}
|
||||||
|
Error error
|
||||||
|
Data interface{}
|
||||||
|
RequestID string
|
||||||
|
RetryCount int
|
||||||
|
Retryable *bool
|
||||||
|
RetryDelay time.Duration
|
||||||
|
NotHoist bool
|
||||||
|
SignedHeaderVals http.Header
|
||||||
|
LastSignedAt time.Time
|
||||||
|
|
||||||
|
built bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Operation is the service API operation to be made.
|
||||||
|
type Operation struct {
|
||||||
|
Name string
|
||||||
|
HTTPMethod string
|
||||||
|
HTTPPath string
|
||||||
|
*Paginator
|
||||||
|
}
|
||||||
|
|
||||||
|
// Paginator keeps track of pagination configuration for an API operation.
|
||||||
|
type Paginator struct {
|
||||||
|
InputTokens []string
|
||||||
|
OutputTokens []string
|
||||||
|
LimitToken string
|
||||||
|
TruncationToken string
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new Request pointer for the service API
|
||||||
|
// operation and parameters.
|
||||||
|
//
|
||||||
|
// Params is any value of input parameters to be the request payload.
|
||||||
|
// Data is pointer value to an object which the request's response
|
||||||
|
// payload will be deserialized to.
|
||||||
|
func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
||||||
|
retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
|
||||||
|
|
||||||
|
method := operation.HTTPMethod
|
||||||
|
if method == "" {
|
||||||
|
method = "POST"
|
||||||
|
}
|
||||||
|
|
||||||
|
httpReq, _ := http.NewRequest(method, "", nil)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
|
||||||
|
if err != nil {
|
||||||
|
httpReq.URL = &url.URL{}
|
||||||
|
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &Request{
|
||||||
|
Config: cfg,
|
||||||
|
ClientInfo: clientInfo,
|
||||||
|
Handlers: handlers.Copy(),
|
||||||
|
|
||||||
|
Retryer: retryer,
|
||||||
|
Time: time.Now(),
|
||||||
|
ExpireTime: 0,
|
||||||
|
Operation: operation,
|
||||||
|
HTTPRequest: httpReq,
|
||||||
|
Body: nil,
|
||||||
|
Params: params,
|
||||||
|
Error: err,
|
||||||
|
Data: data,
|
||||||
|
}
|
||||||
|
r.SetBufferBody([]byte{})
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// WillRetry returns if the request's can be retried.
|
||||||
|
func (r *Request) WillRetry() bool {
|
||||||
|
return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParamsFilled returns if the request's parameters have been populated
|
||||||
|
// and the parameters are valid. False is returned if no parameters are
|
||||||
|
// provided or invalid.
|
||||||
|
func (r *Request) ParamsFilled() bool {
|
||||||
|
return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DataFilled returns true if the request's data for response deserialization
|
||||||
|
// target has been set and is a valid. False is returned if data is not
|
||||||
|
// set, or is invalid.
|
||||||
|
func (r *Request) DataFilled() bool {
|
||||||
|
return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBufferBody will set the request's body bytes that will be sent to
|
||||||
|
// the service API.
|
||||||
|
func (r *Request) SetBufferBody(buf []byte) {
|
||||||
|
r.SetReaderBody(bytes.NewReader(buf))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStringBody sets the body of the request to be backed by a string.
|
||||||
|
func (r *Request) SetStringBody(s string) {
|
||||||
|
r.SetReaderBody(strings.NewReader(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReaderBody will set the request's body reader.
|
||||||
|
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
||||||
|
r.HTTPRequest.Body = newOffsetReader(reader, 0)
|
||||||
|
r.Body = reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Presign returns the request's signed URL. Error will be returned
|
||||||
|
// if the signing fails.
|
||||||
|
func (r *Request) Presign(expireTime time.Duration) (string, error) {
|
||||||
|
r.ExpireTime = expireTime
|
||||||
|
r.NotHoist = false
|
||||||
|
r.Sign()
|
||||||
|
if r.Error != nil {
|
||||||
|
return "", r.Error
|
||||||
|
}
|
||||||
|
return r.HTTPRequest.URL.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PresignRequest behaves just like presign, but hoists all headers and signs them.
|
||||||
|
// Also returns the signed hash back to the user
|
||||||
|
func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
|
||||||
|
r.ExpireTime = expireTime
|
||||||
|
r.NotHoist = true
|
||||||
|
r.Sign()
|
||||||
|
if r.Error != nil {
|
||||||
|
return "", nil, r.Error
|
||||||
|
}
|
||||||
|
return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func debugLogReqError(r *Request, stage string, retrying bool, err error) {
|
||||||
|
if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
retryStr := "not retrying"
|
||||||
|
if retrying {
|
||||||
|
retryStr = "will retry"
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
|
||||||
|
stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build will build the request's object so it can be signed and sent
|
||||||
|
// to the service. Build will also validate all the request's parameters.
|
||||||
|
// Anny additional build Handlers set on this request will be run
|
||||||
|
// in the order they were set.
|
||||||
|
//
|
||||||
|
// The request will only be built once. Multiple calls to build will have
|
||||||
|
// no effect.
|
||||||
|
//
|
||||||
|
// If any Validate or Build errors occur the build will stop and the error
|
||||||
|
// which occurred will be returned.
|
||||||
|
func (r *Request) Build() error {
|
||||||
|
if !r.built {
|
||||||
|
r.Handlers.Validate.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Validate Request", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
r.Handlers.Build.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Build Request", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
r.built = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign will sign the request returning error if errors are encountered.
|
||||||
|
//
|
||||||
|
// Send will build the request prior to signing. All Sign Handlers will
|
||||||
|
// be executed in the order they were set.
|
||||||
|
func (r *Request) Sign() error {
|
||||||
|
r.Build()
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Build Request", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Handlers.Sign.Run(r)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send will send the request returning error if errors are encountered.
|
||||||
|
//
|
||||||
|
// Send will sign the request prior to sending. All Send Handlers will
|
||||||
|
// be executed in the order they were set.
|
||||||
|
//
|
||||||
|
// Canceling a request is non-deterministic. If a request has been canceled,
|
||||||
|
// then the transport will choose, randomly, one of the state channels during
|
||||||
|
// reads or getting the connection.
|
||||||
|
//
|
||||||
|
// readLoop() and getConn(req *Request, cm connectMethod)
|
||||||
|
// https://github.com/golang/go/blob/master/src/net/http/transport.go
|
||||||
|
func (r *Request) Send() error {
|
||||||
|
for {
|
||||||
|
if aws.BoolValue(r.Retryable) {
|
||||||
|
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
|
||||||
|
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
|
||||||
|
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
|
||||||
|
}
|
||||||
|
|
||||||
|
var body io.ReadCloser
|
||||||
|
if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok {
|
||||||
|
body = reader.CloseAndCopy(r.BodyStart)
|
||||||
|
} else {
|
||||||
|
if r.Config.Logger != nil {
|
||||||
|
r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions")
|
||||||
|
}
|
||||||
|
r.Body.Seek(r.BodyStart, 0)
|
||||||
|
body = ioutil.NopCloser(r.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body)
|
||||||
|
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
|
||||||
|
// Closing response body. Since we are setting a new request to send off, this
|
||||||
|
// response will get squashed and leaked.
|
||||||
|
r.HTTPResponse.Body.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Sign()
|
||||||
|
if r.Error != nil {
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Retryable = nil
|
||||||
|
|
||||||
|
r.Handlers.Send.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
if strings.Contains(r.Error.Error(), "net/http: request canceled") {
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
err := r.Error
|
||||||
|
r.Handlers.Retry.Run(r)
|
||||||
|
r.Handlers.AfterRetry.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Send Request", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
debugLogReqError(r, "Send Request", true, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Handlers.UnmarshalMeta.Run(r)
|
||||||
|
r.Handlers.ValidateResponse.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
err := r.Error
|
||||||
|
r.Handlers.UnmarshalError.Run(r)
|
||||||
|
r.Handlers.Retry.Run(r)
|
||||||
|
r.Handlers.AfterRetry.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Validate Response", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
debugLogReqError(r, "Validate Response", true, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Handlers.Unmarshal.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
err := r.Error
|
||||||
|
r.Handlers.Retry.Run(r)
|
||||||
|
r.Handlers.AfterRetry.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Unmarshal Response", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
debugLogReqError(r, "Unmarshal Response", true, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddToUserAgent adds the string to the end of the request's current user agent.
|
||||||
|
func AddToUserAgent(r *Request, s string) {
|
||||||
|
curUA := r.HTTPRequest.Header.Get("User-Agent")
|
||||||
|
if len(curUA) > 0 {
|
||||||
|
s = curUA + " " + s
|
||||||
|
}
|
||||||
|
r.HTTPRequest.Header.Set("User-Agent", s)
|
||||||
|
}
|
104
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
Normal file
104
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
//type Paginater interface {
|
||||||
|
// HasNextPage() bool
|
||||||
|
// NextPage() *Request
|
||||||
|
// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error
|
||||||
|
//}
|
||||||
|
|
||||||
|
// HasNextPage returns true if this request has more pages of data available.
|
||||||
|
func (r *Request) HasNextPage() bool {
|
||||||
|
return len(r.nextPageTokens()) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextPageTokens returns the tokens to use when asking for the next page of
|
||||||
|
// data.
|
||||||
|
func (r *Request) nextPageTokens() []interface{} {
|
||||||
|
if r.Operation.Paginator == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Operation.TruncationToken != "" {
|
||||||
|
tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
|
||||||
|
if len(tr) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := tr[0].(type) {
|
||||||
|
case *bool:
|
||||||
|
if !aws.BoolValue(v) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case bool:
|
||||||
|
if v == false {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens := []interface{}{}
|
||||||
|
tokenAdded := false
|
||||||
|
for _, outToken := range r.Operation.OutputTokens {
|
||||||
|
v, _ := awsutil.ValuesAtPath(r.Data, outToken)
|
||||||
|
if len(v) > 0 {
|
||||||
|
tokens = append(tokens, v[0])
|
||||||
|
tokenAdded = true
|
||||||
|
} else {
|
||||||
|
tokens = append(tokens, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !tokenAdded {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextPage returns a new Request that can be executed to return the next
|
||||||
|
// page of result data. Call .Send() on this request to execute it.
|
||||||
|
func (r *Request) NextPage() *Request {
|
||||||
|
tokens := r.nextPageTokens()
|
||||||
|
if len(tokens) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
|
||||||
|
nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
|
||||||
|
for i, intok := range nr.Operation.InputTokens {
|
||||||
|
awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
|
||||||
|
}
|
||||||
|
return nr
|
||||||
|
}
|
||||||
|
|
||||||
|
// EachPage iterates over each page of a paginated request object. The fn
|
||||||
|
// parameter should be a function with the following sample signature:
|
||||||
|
//
|
||||||
|
// func(page *T, lastPage bool) bool {
|
||||||
|
// return true // return false to stop iterating
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Where "T" is the structure type matching the output structure of the given
|
||||||
|
// operation. For example, a request object generated by
|
||||||
|
// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
|
||||||
|
// as the structure "T". The lastPage value represents whether the page is
|
||||||
|
// the last page of data or not. The return value of this function should
|
||||||
|
// return true to keep iterating or false to stop.
|
||||||
|
func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
|
||||||
|
for page := r; page != nil; page = page.NextPage() {
|
||||||
|
if err := page.Send(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
|
||||||
|
return page.Error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
101
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
Normal file
101
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Retryer is an interface to control retry logic for a given service.
|
||||||
|
// The default implementation used by most services is the service.DefaultRetryer
|
||||||
|
// structure, which contains basic retry logic using exponential backoff.
|
||||||
|
type Retryer interface {
|
||||||
|
RetryRules(*Request) time.Duration
|
||||||
|
ShouldRetry(*Request) bool
|
||||||
|
MaxRetries() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRetryer sets a config Retryer value to the given Config returning it
|
||||||
|
// for chaining.
|
||||||
|
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
|
||||||
|
cfg.Retryer = retryer
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
// retryableCodes is a collection of service response codes which are retry-able
|
||||||
|
// without any further action.
|
||||||
|
var retryableCodes = map[string]struct{}{
|
||||||
|
"RequestError": {},
|
||||||
|
"RequestTimeout": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
var throttleCodes = map[string]struct{}{
|
||||||
|
"ProvisionedThroughputExceededException": {},
|
||||||
|
"Throttling": {},
|
||||||
|
"ThrottlingException": {},
|
||||||
|
"RequestLimitExceeded": {},
|
||||||
|
"RequestThrottled": {},
|
||||||
|
"LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once
|
||||||
|
"TooManyRequestsException": {}, // Lambda functions
|
||||||
|
}
|
||||||
|
|
||||||
|
// credsExpiredCodes is a collection of error codes which signify the credentials
|
||||||
|
// need to be refreshed. Expired tokens require refreshing of credentials, and
|
||||||
|
// resigning before the request can be retried.
|
||||||
|
var credsExpiredCodes = map[string]struct{}{
|
||||||
|
"ExpiredToken": {},
|
||||||
|
"ExpiredTokenException": {},
|
||||||
|
"RequestExpired": {}, // EC2 Only
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCodeThrottle(code string) bool {
|
||||||
|
_, ok := throttleCodes[code]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCodeRetryable(code string) bool {
|
||||||
|
if _, ok := retryableCodes[code]; ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return isCodeExpiredCreds(code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCodeExpiredCreds(code string) bool {
|
||||||
|
_, ok := credsExpiredCodes[code]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrorRetryable returns whether the error is retryable, based on its Code.
|
||||||
|
// Returns false if the request has no Error set.
|
||||||
|
func (r *Request) IsErrorRetryable() bool {
|
||||||
|
if r.Error != nil {
|
||||||
|
if err, ok := r.Error.(awserr.Error); ok {
|
||||||
|
return isCodeRetryable(err.Code())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrorThrottle returns whether the error is to be throttled based on its code.
|
||||||
|
// Returns false if the request has no Error set
|
||||||
|
func (r *Request) IsErrorThrottle() bool {
|
||||||
|
if r.Error != nil {
|
||||||
|
if err, ok := r.Error.(awserr.Error); ok {
|
||||||
|
return isCodeThrottle(err.Code())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrorExpired returns whether the error code is a credential expiry error.
|
||||||
|
// Returns false if the request has no Error set.
|
||||||
|
func (r *Request) IsErrorExpired() bool {
|
||||||
|
if r.Error != nil {
|
||||||
|
if err, ok := r.Error.(awserr.Error); ok {
|
||||||
|
return isCodeExpiredCreds(err.Code())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue