From ef0bbf53afe366cfabb6a157249c18cd5b3ade9b Mon Sep 17 00:00:00 2001 From: Craig Peterson Date: Mon, 22 Aug 2016 18:31:50 -0600 Subject: [PATCH] migrate code for github --- .gitignore | 1 + LICENSE | 20 + js/helpers.js | 251 + js/js.go | 46 + js/js_test.go | 58 + js/parse_tests/001-basic.js | 6 + js/parse_tests/001-basic.json | 30 + js/parse_tests/002-ttl.js | 5 + js/parse_tests/002-ttl.json | 31 + js/parse_tests/003-meta.js | 5 + js/parse_tests/003-meta.json | 26 + js/parse_tests/004-ips.js | 10 + js/parse_tests/004-ips.json | 28 + js/parse_tests/005-multipleDomains.js | 7 + js/parse_tests/005-multipleDomains.json | 35 + js/static.go | 253 + main.go | 321 + models/dns.go | 204 + models/dns_test.go | 39 + normalize/validate.go | 294 + normalize/validate_test.go | 140 + providers/activedir/activedirProvider.go | 37 + providers/activedir/adzonedump.test2.json | Bin 0 -> 1822 bytes providers/activedir/doc.md | 78 + providers/activedir/domains.go | 293 + providers/activedir/domains_test.go | 40 + providers/activedir/getzones_other.go | 17 + providers/activedir/getzones_windows.go | 95 + providers/activedir/zone.testzone.json | Bin 0 -> 6000 bytes providers/bind/bindProvider.go | 304 + providers/bind/prettyzone.go | 227 + providers/bind/prettyzone_test.go | 299 + providers/bind/serial.go | 71 + providers/bind/serial_test.go | 51 + providers/cloudflare/cloudflareProvider.go | 306 + providers/cloudflare/preprocess_test.go | 116 + providers/cloudflare/rest.go | 251 + providers/config/providerConfig.go | 50 + providers/diff/diff.go | 153 + providers/diff/diff_test.go | 112 + providers/gandi/gandiProvider.go | 184 + providers/gandi/protocol.go | 168 + providers/namecheap/namecheap.go | 61 + providers/namedotcom/namedotcom.md | 48 + providers/namedotcom/namedotcomProvider.go | 117 + providers/namedotcom/nameservers.go | 82 + providers/namedotcom/nameservers_test.go | 150 + providers/namedotcom/records.go | 168 + providers/providers.go | 119 + providers/route53/route53Provider.go | 269 + providers/route53/route53Provider_test.go | 24 + transform/transform.go | 113 + transform/transform_test.go | 213 + .../DisposaBoy/JsonConfigReader/AUTHORS.md | 4 + .../DisposaBoy/JsonConfigReader/LICENSE.md | 7 + .../DisposaBoy/JsonConfigReader/README.md | 36 + .../DisposaBoy/JsonConfigReader/reader.go | 94 + .../github.com/NYTimes/gziphandler/LICENSE.md | 13 + .../github.com/NYTimes/gziphandler/README.md | 52 + vendor/github.com/NYTimes/gziphandler/gzip.go | 200 + vendor/github.com/TomOnTime/utfutil/LICENSE | 27 + vendor/github.com/TomOnTime/utfutil/README.md | 70 + .../github.com/TomOnTime/utfutil/utfutil.go | 110 + vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + .../aws/aws-sdk-go/aws/awserr/error.go | 145 + .../aws/aws-sdk-go/aws/awserr/types.go | 194 + .../aws/aws-sdk-go/aws/awsutil/copy.go | 100 + .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 107 + .../aws-sdk-go/aws/awsutil/string_value.go | 89 + .../aws/aws-sdk-go/aws/client/client.go | 120 + .../aws-sdk-go/aws/client/default_retryer.go | 90 + .../aws/client/metadata/client_info.go | 12 + .../github.com/aws/aws-sdk-go/aws/config.go | 422 + .../aws/aws-sdk-go/aws/convert_types.go | 369 + .../aws-sdk-go/aws/corehandlers/handlers.go | 152 + .../aws/corehandlers/param_validator.go | 17 + .../aws/credentials/chain_provider.go | 100 + .../aws-sdk-go/aws/credentials/credentials.go | 223 + .../ec2rolecreds/ec2_role_provider.go | 178 + .../aws/credentials/endpointcreds/provider.go | 191 + .../aws/credentials/env_provider.go | 77 + .../aws-sdk-go/aws/credentials/example.ini | 12 + .../shared_credentials_provider.go | 151 + .../aws/credentials/static_provider.go | 57 + .../stscreds/assume_role_provider.go | 161 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 129 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 140 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 124 + .../github.com/aws/aws-sdk-go/aws/errors.go | 17 + .../github.com/aws/aws-sdk-go/aws/logger.go | 112 + .../aws/aws-sdk-go/aws/request/handlers.go | 187 + .../aws-sdk-go/aws/request/http_request.go | 33 + .../aws/request/http_request_1_4.go | 31 + .../aws-sdk-go/aws/request/offset_reader.go | 49 + .../aws/aws-sdk-go/aws/request/request.go | 326 + .../aws/request/request_pagination.go | 104 + .../aws/aws-sdk-go/aws/request/retryer.go | 101 + .../aws/aws-sdk-go/aws/request/validation.go | 234 + .../aws/aws-sdk-go/aws/session/doc.go | 229 + .../aws/aws-sdk-go/aws/session/env_config.go | 188 + .../aws/aws-sdk-go/aws/session/session.go | 388 + .../aws-sdk-go/aws/session/shared_config.go | 294 + .../aws-sdk-go/aws/signer/v4/header_rules.go | 82 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 644 + vendor/github.com/aws/aws-sdk-go/aws/types.go | 106 + .../github.com/aws/aws-sdk-go/aws/version.go | 8 + .../aws-sdk-go/private/endpoints/endpoints.go | 70 + .../private/endpoints/endpoints.json | 78 + .../private/endpoints/endpoints_map.go | 91 + .../private/protocol/idempotency.go | 75 + .../private/protocol/query/build.go | 36 + .../protocol/query/queryutil/queryutil.go | 230 + .../private/protocol/query/unmarshal.go | 35 + .../private/protocol/query/unmarshal_error.go | 66 + .../aws-sdk-go/private/protocol/rest/build.go | 256 + .../private/protocol/rest/payload.go | 45 + .../private/protocol/rest/unmarshal.go | 198 + .../private/protocol/restxml/restxml.go | 69 + .../aws-sdk-go/private/protocol/unmarshal.go | 21 + .../private/protocol/xml/xmlutil/build.go | 293 + .../private/protocol/xml/xmlutil/unmarshal.go | 260 + .../protocol/xml/xmlutil/xml_to_struct.go | 105 + .../aws/aws-sdk-go/private/waiter/waiter.go | 134 + .../aws/aws-sdk-go/service/route53/api.go | 7830 +++ .../service/route53/customizations.go | 30 + .../aws/aws-sdk-go/service/route53/service.go | 86 + .../service/route53/unmarshal_error.go | 77 + .../aws/aws-sdk-go/service/route53/waiters.go | 30 + .../aws/aws-sdk-go/service/sts/api.go | 1653 + .../aws-sdk-go/service/sts/customizations.go | 12 + .../aws/aws-sdk-go/service/sts/service.go | 130 + .../github.com/billputer/go-namecheap/LICENSE | 21 + .../billputer/go-namecheap/README.md | 43 + .../github.com/billputer/go-namecheap/dns.go | 98 + .../billputer/go-namecheap/domain.go | 136 + .../billputer/go-namecheap/namecheap.go | 161 + .../github.com/billputer/go-namecheap/ns.go | 35 + .../billputer/go-namecheap/registrant.go | 119 + vendor/github.com/go-ini/ini/LICENSE | 191 + vendor/github.com/go-ini/ini/README.md | 560 + vendor/github.com/go-ini/ini/README_ZH.md | 547 + vendor/github.com/go-ini/ini/ini.go | 1226 + vendor/github.com/go-ini/ini/struct.go | 350 + .../github.com/jmespath/go-jmespath/LICENSE | 13 + .../github.com/jmespath/go-jmespath/Makefile | 44 + .../github.com/jmespath/go-jmespath/README.md | 7 + vendor/github.com/jmespath/go-jmespath/api.go | 12 + .../go-jmespath/astnodetype_string.go | 16 + .../jmespath/go-jmespath/functions.go | 840 + .../jmespath/go-jmespath/interpreter.go | 418 + .../github.com/jmespath/go-jmespath/lexer.go | 420 + .../github.com/jmespath/go-jmespath/parser.go | 603 + .../jmespath/go-jmespath/toktype_string.go | 16 + .../github.com/jmespath/go-jmespath/util.go | 185 + vendor/github.com/kolo/xmlrpc/LICENSE | 19 + vendor/github.com/kolo/xmlrpc/README.md | 79 + vendor/github.com/kolo/xmlrpc/client.go | 144 + vendor/github.com/kolo/xmlrpc/decoder.go | 449 + vendor/github.com/kolo/xmlrpc/encoder.go | 164 + vendor/github.com/kolo/xmlrpc/request.go | 57 + vendor/github.com/kolo/xmlrpc/response.go | 52 + vendor/github.com/kolo/xmlrpc/test_server.rb | 25 + vendor/github.com/kolo/xmlrpc/xmlrpc.go | 19 + vendor/github.com/miekg/dns/AUTHORS | 1 + vendor/github.com/miekg/dns/CONTRIBUTORS | 9 + vendor/github.com/miekg/dns/COPYRIGHT | 9 + vendor/github.com/miekg/dns/LICENSE | 32 + vendor/github.com/miekg/dns/README.md | 151 + vendor/github.com/miekg/dns/client.go | 455 + vendor/github.com/miekg/dns/clientconfig.go | 99 + vendor/github.com/miekg/dns/defaults.go | 282 + vendor/github.com/miekg/dns/dns.go | 104 + vendor/github.com/miekg/dns/dnssec.go | 721 + vendor/github.com/miekg/dns/dnssec_keygen.go | 156 + vendor/github.com/miekg/dns/dnssec_keyscan.go | 249 + vendor/github.com/miekg/dns/dnssec_privkey.go | 85 + vendor/github.com/miekg/dns/dnsutil/util.go | 79 + vendor/github.com/miekg/dns/doc.go | 251 + vendor/github.com/miekg/dns/edns.go | 532 + vendor/github.com/miekg/dns/format.go | 87 + vendor/github.com/miekg/dns/generate.go | 159 + vendor/github.com/miekg/dns/labels.go | 168 + vendor/github.com/miekg/dns/msg.go | 1231 + vendor/github.com/miekg/dns/msg_generate.go | 340 + vendor/github.com/miekg/dns/msg_helpers.go | 630 + vendor/github.com/miekg/dns/nsecx.go | 119 + vendor/github.com/miekg/dns/privaterr.go | 149 + vendor/github.com/miekg/dns/rawmsg.go | 49 + vendor/github.com/miekg/dns/reverse.go | 38 + vendor/github.com/miekg/dns/sanitize.go | 84 + vendor/github.com/miekg/dns/scan.go | 974 + vendor/github.com/miekg/dns/scan_rr.go | 2143 + vendor/github.com/miekg/dns/scanner.go | 43 + vendor/github.com/miekg/dns/server.go | 732 + vendor/github.com/miekg/dns/sig0.go | 219 + vendor/github.com/miekg/dns/singleinflight.go | 57 + vendor/github.com/miekg/dns/tlsa.go | 86 + vendor/github.com/miekg/dns/tsig.go | 384 + vendor/github.com/miekg/dns/types.go | 1249 + vendor/github.com/miekg/dns/types_generate.go | 271 + vendor/github.com/miekg/dns/udp.go | 58 + vendor/github.com/miekg/dns/udp_linux.go | 73 + vendor/github.com/miekg/dns/udp_other.go | 17 + vendor/github.com/miekg/dns/udp_plan9.go | 34 + vendor/github.com/miekg/dns/udp_windows.go | 34 + vendor/github.com/miekg/dns/update.go | 106 + vendor/github.com/miekg/dns/xfr.go | 244 + vendor/github.com/miekg/dns/zmsg.go | 3464 + vendor/github.com/miekg/dns/ztypes.go | 828 + .../github.com/prasmussen/gandi-api/LICENSE | 22 + .../prasmussen/gandi-api/client/client.go | 37 + .../prasmussen/gandi-api/domain/domain.go | 77 + .../prasmussen/gandi-api/domain/structs.go | 57 + .../prasmussen/gandi-api/domain/util.go | 69 + .../gandi-api/domain/zone/record/record.go | 118 + .../gandi-api/domain/zone/record/structs.go | 30 + .../gandi-api/domain/zone/record/util.go | 16 + .../gandi-api/domain/zone/structs.go | 24 + .../prasmussen/gandi-api/domain/zone/util.go | 30 + .../gandi-api/domain/zone/version/structs.go | 10 + .../gandi-api/domain/zone/version/util.go | 13 + .../gandi-api/domain/zone/version/version.go | 68 + .../prasmussen/gandi-api/domain/zone/zone.go | 81 + .../gandi-api/operation/operation.go | 60 + .../prasmussen/gandi-api/operation/structs.go | 29 + .../prasmussen/gandi-api/operation/util.go | 33 + .../prasmussen/gandi-api/util/util.go | 76 + .../robertkrimen/otto/DESIGN.markdown | 1 + vendor/github.com/robertkrimen/otto/LICENSE | 7 + vendor/github.com/robertkrimen/otto/Makefile | 63 + .../robertkrimen/otto/README.markdown | 870 + .../robertkrimen/otto/ast/README.markdown | 1068 + .../robertkrimen/otto/ast/comments.go | 278 + .../github.com/robertkrimen/otto/ast/node.go | 515 + .../github.com/robertkrimen/otto/builtin.go | 353 + .../robertkrimen/otto/builtin_array.go | 681 + .../robertkrimen/otto/builtin_boolean.go | 28 + .../robertkrimen/otto/builtin_date.go | 615 + .../robertkrimen/otto/builtin_error.go | 126 + .../robertkrimen/otto/builtin_function.go | 129 + .../robertkrimen/otto/builtin_json.go | 299 + .../robertkrimen/otto/builtin_math.go | 151 + .../robertkrimen/otto/builtin_number.go | 93 + .../robertkrimen/otto/builtin_object.go | 289 + .../robertkrimen/otto/builtin_regexp.go | 65 + .../robertkrimen/otto/builtin_string.go | 500 + vendor/github.com/robertkrimen/otto/clone.go | 173 + vendor/github.com/robertkrimen/otto/cmpl.go | 24 + .../robertkrimen/otto/cmpl_evaluate.go | 96 + .../otto/cmpl_evaluate_expression.go | 460 + .../otto/cmpl_evaluate_statement.go | 424 + .../robertkrimen/otto/cmpl_parse.go | 656 + .../github.com/robertkrimen/otto/console.go | 51 + vendor/github.com/robertkrimen/otto/dbg.go | 9 + .../github.com/robertkrimen/otto/dbg/dbg.go | 387 + vendor/github.com/robertkrimen/otto/error.go | 252 + .../github.com/robertkrimen/otto/evaluate.go | 318 + .../robertkrimen/otto/file/README.markdown | 110 + .../github.com/robertkrimen/otto/file/file.go | 164 + vendor/github.com/robertkrimen/otto/global.go | 221 + vendor/github.com/robertkrimen/otto/inline.go | 6649 ++ vendor/github.com/robertkrimen/otto/inline.pl | 1086 + vendor/github.com/robertkrimen/otto/object.go | 156 + .../robertkrimen/otto/object_class.go | 493 + vendor/github.com/robertkrimen/otto/otto.go | 769 + vendor/github.com/robertkrimen/otto/otto_.go | 178 + .../robertkrimen/otto/parser/Makefile | 4 + .../robertkrimen/otto/parser/README.markdown | 190 + .../robertkrimen/otto/parser/dbg.go | 9 + .../robertkrimen/otto/parser/error.go | 175 + .../robertkrimen/otto/parser/expression.go | 1005 + .../robertkrimen/otto/parser/lexer.go | 866 + .../robertkrimen/otto/parser/parser.go | 344 + .../robertkrimen/otto/parser/regexp.go | 358 + .../robertkrimen/otto/parser/scope.go | 44 + .../robertkrimen/otto/parser/statement.go | 938 + .../github.com/robertkrimen/otto/property.go | 220 + .../otto/registry/README.markdown | 51 + .../robertkrimen/otto/registry/registry.go | 47 + vendor/github.com/robertkrimen/otto/result.go | 30 + .../github.com/robertkrimen/otto/runtime.go | 679 + vendor/github.com/robertkrimen/otto/scope.go | 35 + vendor/github.com/robertkrimen/otto/script.go | 119 + vendor/github.com/robertkrimen/otto/stash.go | 296 + .../robertkrimen/otto/token/Makefile | 2 + .../robertkrimen/otto/token/README.markdown | 171 + .../robertkrimen/otto/token/token.go | 116 + .../robertkrimen/otto/token/token_const.go | 349 + .../robertkrimen/otto/token/tokenfmt | 222 + .../robertkrimen/otto/type_arguments.go | 106 + .../robertkrimen/otto/type_array.go | 109 + .../robertkrimen/otto/type_boolean.go | 13 + .../github.com/robertkrimen/otto/type_date.go | 299 + .../robertkrimen/otto/type_error.go | 24 + .../robertkrimen/otto/type_function.go | 292 + .../robertkrimen/otto/type_go_array.go | 134 + .../robertkrimen/otto/type_go_map.go | 87 + .../robertkrimen/otto/type_go_slice.go | 126 + .../robertkrimen/otto/type_go_struct.go | 146 + .../robertkrimen/otto/type_number.go | 5 + .../robertkrimen/otto/type_reference.go | 103 + .../robertkrimen/otto/type_regexp.go | 146 + .../robertkrimen/otto/type_string.go | 112 + .../robertkrimen/otto/underscore/Makefile | 11 + .../otto/underscore/README.markdown | 53 + .../robertkrimen/otto/underscore/source.go | 3463 + .../robertkrimen/otto/underscore/testify | 84 + .../otto/underscore/underscore.go | 49 + vendor/github.com/robertkrimen/otto/value.go | 1033 + .../robertkrimen/otto/value_boolean.go | 40 + .../robertkrimen/otto/value_number.go | 324 + .../robertkrimen/otto/value_primitive.go | 23 + .../robertkrimen/otto/value_string.go | 103 + vendor/golang.org/x/text/LICENSE | 27 + vendor/golang.org/x/text/PATENTS | 22 + vendor/golang.org/x/text/encoding/encoding.go | 335 + .../text/encoding/internal/identifier/gen.go | 137 + .../internal/identifier/identifier.go | 81 + .../text/encoding/internal/identifier/mib.go | 1621 + .../x/text/encoding/internal/internal.go | 75 + .../x/text/encoding/unicode/override.go | 82 + .../x/text/encoding/unicode/unicode.go | 434 + vendor/golang.org/x/text/internal/gen/code.go | 338 + vendor/golang.org/x/text/internal/gen/gen.go | 226 + .../internal/utf8internal/utf8internal.go | 87 + vendor/golang.org/x/text/runes/cond.go | 126 + vendor/golang.org/x/text/runes/runes.go | 278 + .../golang.org/x/text/transform/transform.go | 661 + vendor/golang.org/x/text/unicode/cldr/base.go | 110 + vendor/golang.org/x/text/unicode/cldr/cldr.go | 130 + .../golang.org/x/text/unicode/cldr/collate.go | 359 + .../golang.org/x/text/unicode/cldr/decode.go | 171 + .../golang.org/x/text/unicode/cldr/makexml.go | 400 + .../golang.org/x/text/unicode/cldr/resolve.go | 602 + .../golang.org/x/text/unicode/cldr/slice.go | 144 + vendor/golang.org/x/text/unicode/cldr/xml.go | 1440 + vendor/gopkg.in/sourcemap.v1/Makefile | 3 + vendor/gopkg.in/sourcemap.v1/README.md | 35 + .../sourcemap.v1/base64vlq/base64_vlq.go | 95 + vendor/gopkg.in/sourcemap.v1/consumer.go | 312 + vendor/vendor.json | 373 + web/.babelrc | 3 + web/app/components/App.vue | 115 + web/app/components/Correction.vue | 57 + web/app/components/DNSConfig.vue | 41 + web/app/components/Domain.vue | 88 + web/app/components/Editor.vue | 64 + web/app/components/Metadata.vue | 41 + web/app/components/Record.vue | 19 + web/app/components/RunResults.vue | 81 + web/app/main.js | 15 + web/bundle.js | 57565 ++++++++++++++++ web/package.json | 35 + web/static.go | 7669 ++ web/web.go | 280 + web/webpack.config.js | 37 + 359 files changed, 157476 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 js/helpers.js create mode 100644 js/js.go create mode 100644 js/js_test.go create mode 100644 js/parse_tests/001-basic.js create mode 100644 js/parse_tests/001-basic.json create mode 100644 js/parse_tests/002-ttl.js create mode 100644 js/parse_tests/002-ttl.json create mode 100644 js/parse_tests/003-meta.js create mode 100644 js/parse_tests/003-meta.json create mode 100644 js/parse_tests/004-ips.js create mode 100644 js/parse_tests/004-ips.json create mode 100644 js/parse_tests/005-multipleDomains.js create mode 100644 js/parse_tests/005-multipleDomains.json create mode 100644 js/static.go create mode 100644 main.go create mode 100644 models/dns.go create mode 100644 models/dns_test.go create mode 100644 normalize/validate.go create mode 100644 normalize/validate_test.go create mode 100644 providers/activedir/activedirProvider.go create mode 100755 providers/activedir/adzonedump.test2.json create mode 100644 providers/activedir/doc.md create mode 100644 providers/activedir/domains.go create mode 100644 providers/activedir/domains_test.go create mode 100644 providers/activedir/getzones_other.go create mode 100644 providers/activedir/getzones_windows.go create mode 100755 providers/activedir/zone.testzone.json create mode 100644 providers/bind/bindProvider.go create mode 100644 providers/bind/prettyzone.go create mode 100644 providers/bind/prettyzone_test.go create mode 100644 providers/bind/serial.go create mode 100644 providers/bind/serial_test.go create mode 100644 providers/cloudflare/cloudflareProvider.go create mode 100644 providers/cloudflare/preprocess_test.go create mode 100644 providers/cloudflare/rest.go create mode 100644 providers/config/providerConfig.go create mode 100644 providers/diff/diff.go create mode 100644 providers/diff/diff_test.go create mode 100644 providers/gandi/gandiProvider.go create mode 100644 providers/gandi/protocol.go create mode 100644 providers/namecheap/namecheap.go create mode 100644 providers/namedotcom/namedotcom.md create mode 100644 providers/namedotcom/namedotcomProvider.go create mode 100644 providers/namedotcom/nameservers.go create mode 100644 providers/namedotcom/nameservers_test.go create mode 100644 providers/namedotcom/records.go create mode 100644 providers/providers.go create mode 100644 providers/route53/route53Provider.go create mode 100644 providers/route53/route53Provider_test.go create mode 100644 transform/transform.go create mode 100644 transform/transform_test.go create mode 100644 vendor/github.com/DisposaBoy/JsonConfigReader/AUTHORS.md create mode 100644 vendor/github.com/DisposaBoy/JsonConfigReader/LICENSE.md create mode 100644 vendor/github.com/DisposaBoy/JsonConfigReader/README.md create mode 100644 vendor/github.com/DisposaBoy/JsonConfigReader/reader.go create mode 100644 vendor/github.com/NYTimes/gziphandler/LICENSE.md create mode 100644 vendor/github.com/NYTimes/gziphandler/README.md create mode 100644 vendor/github.com/NYTimes/gziphandler/gzip.go create mode 100644 vendor/github.com/TomOnTime/utfutil/LICENSE create mode 100644 vendor/github.com/TomOnTime/utfutil/README.md create mode 100644 vendor/github.com/TomOnTime/utfutil/utfutil.go create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 vendor/github.com/billputer/go-namecheap/LICENSE create mode 100644 vendor/github.com/billputer/go-namecheap/README.md create mode 100644 vendor/github.com/billputer/go-namecheap/dns.go create mode 100644 vendor/github.com/billputer/go-namecheap/domain.go create mode 100644 vendor/github.com/billputer/go-namecheap/namecheap.go create mode 100644 vendor/github.com/billputer/go-namecheap/ns.go create mode 100644 vendor/github.com/billputer/go-namecheap/registrant.go create mode 100644 vendor/github.com/go-ini/ini/LICENSE create mode 100644 vendor/github.com/go-ini/ini/README.md create mode 100644 vendor/github.com/go-ini/ini/README_ZH.md create mode 100644 vendor/github.com/go-ini/ini/ini.go create mode 100644 vendor/github.com/go-ini/ini/struct.go create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile create mode 100644 vendor/github.com/jmespath/go-jmespath/README.md create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 vendor/github.com/kolo/xmlrpc/LICENSE create mode 100644 vendor/github.com/kolo/xmlrpc/README.md create mode 100644 vendor/github.com/kolo/xmlrpc/client.go create mode 100644 vendor/github.com/kolo/xmlrpc/decoder.go create mode 100644 vendor/github.com/kolo/xmlrpc/encoder.go create mode 100644 vendor/github.com/kolo/xmlrpc/request.go create mode 100644 vendor/github.com/kolo/xmlrpc/response.go create mode 100644 vendor/github.com/kolo/xmlrpc/test_server.rb create mode 100644 vendor/github.com/kolo/xmlrpc/xmlrpc.go create mode 100644 vendor/github.com/miekg/dns/AUTHORS create mode 100644 vendor/github.com/miekg/dns/CONTRIBUTORS create mode 100644 vendor/github.com/miekg/dns/COPYRIGHT create mode 100644 vendor/github.com/miekg/dns/LICENSE create mode 100644 vendor/github.com/miekg/dns/README.md create mode 100644 vendor/github.com/miekg/dns/client.go create mode 100644 vendor/github.com/miekg/dns/clientconfig.go create mode 100644 vendor/github.com/miekg/dns/defaults.go create mode 100644 vendor/github.com/miekg/dns/dns.go create mode 100644 vendor/github.com/miekg/dns/dnssec.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keygen.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keyscan.go create mode 100644 vendor/github.com/miekg/dns/dnssec_privkey.go create mode 100644 vendor/github.com/miekg/dns/dnsutil/util.go create mode 100644 vendor/github.com/miekg/dns/doc.go create mode 100644 vendor/github.com/miekg/dns/edns.go create mode 100644 vendor/github.com/miekg/dns/format.go create mode 100644 vendor/github.com/miekg/dns/generate.go create mode 100644 vendor/github.com/miekg/dns/labels.go create mode 100644 vendor/github.com/miekg/dns/msg.go create mode 100644 vendor/github.com/miekg/dns/msg_generate.go create mode 100644 vendor/github.com/miekg/dns/msg_helpers.go create mode 100644 vendor/github.com/miekg/dns/nsecx.go create mode 100644 vendor/github.com/miekg/dns/privaterr.go create mode 100644 vendor/github.com/miekg/dns/rawmsg.go create mode 100644 vendor/github.com/miekg/dns/reverse.go create mode 100644 vendor/github.com/miekg/dns/sanitize.go create mode 100644 vendor/github.com/miekg/dns/scan.go create mode 100644 vendor/github.com/miekg/dns/scan_rr.go create mode 100644 vendor/github.com/miekg/dns/scanner.go create mode 100644 vendor/github.com/miekg/dns/server.go create mode 100644 vendor/github.com/miekg/dns/sig0.go create mode 100644 vendor/github.com/miekg/dns/singleinflight.go create mode 100644 vendor/github.com/miekg/dns/tlsa.go create mode 100644 vendor/github.com/miekg/dns/tsig.go create mode 100644 vendor/github.com/miekg/dns/types.go create mode 100644 vendor/github.com/miekg/dns/types_generate.go create mode 100644 vendor/github.com/miekg/dns/udp.go create mode 100644 vendor/github.com/miekg/dns/udp_linux.go create mode 100644 vendor/github.com/miekg/dns/udp_other.go create mode 100644 vendor/github.com/miekg/dns/udp_plan9.go create mode 100644 vendor/github.com/miekg/dns/udp_windows.go create mode 100644 vendor/github.com/miekg/dns/update.go create mode 100644 vendor/github.com/miekg/dns/xfr.go create mode 100644 vendor/github.com/miekg/dns/zmsg.go create mode 100644 vendor/github.com/miekg/dns/ztypes.go create mode 100644 vendor/github.com/prasmussen/gandi-api/LICENSE create mode 100644 vendor/github.com/prasmussen/gandi-api/client/client.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/domain.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/structs.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/util.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/zone/record/record.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/zone/record/structs.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/zone/record/util.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/zone/structs.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/zone/util.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/zone/version/structs.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/zone/version/util.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/zone/version/version.go create mode 100644 vendor/github.com/prasmussen/gandi-api/domain/zone/zone.go create mode 100644 vendor/github.com/prasmussen/gandi-api/operation/operation.go create mode 100644 vendor/github.com/prasmussen/gandi-api/operation/structs.go create mode 100644 vendor/github.com/prasmussen/gandi-api/operation/util.go create mode 100644 vendor/github.com/prasmussen/gandi-api/util/util.go create mode 100644 vendor/github.com/robertkrimen/otto/DESIGN.markdown create mode 100644 vendor/github.com/robertkrimen/otto/LICENSE create mode 100644 vendor/github.com/robertkrimen/otto/Makefile create mode 100644 vendor/github.com/robertkrimen/otto/README.markdown create mode 100644 vendor/github.com/robertkrimen/otto/ast/README.markdown create mode 100644 vendor/github.com/robertkrimen/otto/ast/comments.go create mode 100644 vendor/github.com/robertkrimen/otto/ast/node.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin_array.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin_boolean.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin_date.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin_error.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin_function.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin_json.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin_math.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin_number.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin_object.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin_regexp.go create mode 100644 vendor/github.com/robertkrimen/otto/builtin_string.go create mode 100644 vendor/github.com/robertkrimen/otto/clone.go create mode 100644 vendor/github.com/robertkrimen/otto/cmpl.go create mode 100644 vendor/github.com/robertkrimen/otto/cmpl_evaluate.go create mode 100644 vendor/github.com/robertkrimen/otto/cmpl_evaluate_expression.go create mode 100644 vendor/github.com/robertkrimen/otto/cmpl_evaluate_statement.go create mode 100644 vendor/github.com/robertkrimen/otto/cmpl_parse.go create mode 100644 vendor/github.com/robertkrimen/otto/console.go create mode 100644 vendor/github.com/robertkrimen/otto/dbg.go create mode 100644 vendor/github.com/robertkrimen/otto/dbg/dbg.go create mode 100644 vendor/github.com/robertkrimen/otto/error.go create mode 100644 vendor/github.com/robertkrimen/otto/evaluate.go create mode 100644 vendor/github.com/robertkrimen/otto/file/README.markdown create mode 100644 vendor/github.com/robertkrimen/otto/file/file.go create mode 100644 vendor/github.com/robertkrimen/otto/global.go create mode 100644 vendor/github.com/robertkrimen/otto/inline.go create mode 100755 vendor/github.com/robertkrimen/otto/inline.pl create mode 100644 vendor/github.com/robertkrimen/otto/object.go create mode 100644 vendor/github.com/robertkrimen/otto/object_class.go create mode 100644 vendor/github.com/robertkrimen/otto/otto.go create mode 100644 vendor/github.com/robertkrimen/otto/otto_.go create mode 100644 vendor/github.com/robertkrimen/otto/parser/Makefile create mode 100644 vendor/github.com/robertkrimen/otto/parser/README.markdown create mode 100644 vendor/github.com/robertkrimen/otto/parser/dbg.go create mode 100644 vendor/github.com/robertkrimen/otto/parser/error.go create mode 100644 vendor/github.com/robertkrimen/otto/parser/expression.go create mode 100644 vendor/github.com/robertkrimen/otto/parser/lexer.go create mode 100644 vendor/github.com/robertkrimen/otto/parser/parser.go create mode 100644 vendor/github.com/robertkrimen/otto/parser/regexp.go create mode 100644 vendor/github.com/robertkrimen/otto/parser/scope.go create mode 100644 vendor/github.com/robertkrimen/otto/parser/statement.go create mode 100644 vendor/github.com/robertkrimen/otto/property.go create mode 100644 vendor/github.com/robertkrimen/otto/registry/README.markdown create mode 100644 vendor/github.com/robertkrimen/otto/registry/registry.go create mode 100644 vendor/github.com/robertkrimen/otto/result.go create mode 100644 vendor/github.com/robertkrimen/otto/runtime.go create mode 100644 vendor/github.com/robertkrimen/otto/scope.go create mode 100644 vendor/github.com/robertkrimen/otto/script.go create mode 100644 vendor/github.com/robertkrimen/otto/stash.go create mode 100644 vendor/github.com/robertkrimen/otto/token/Makefile create mode 100644 vendor/github.com/robertkrimen/otto/token/README.markdown create mode 100644 vendor/github.com/robertkrimen/otto/token/token.go create mode 100644 vendor/github.com/robertkrimen/otto/token/token_const.go create mode 100755 vendor/github.com/robertkrimen/otto/token/tokenfmt create mode 100644 vendor/github.com/robertkrimen/otto/type_arguments.go create mode 100644 vendor/github.com/robertkrimen/otto/type_array.go create mode 100644 vendor/github.com/robertkrimen/otto/type_boolean.go create mode 100644 vendor/github.com/robertkrimen/otto/type_date.go create mode 100644 vendor/github.com/robertkrimen/otto/type_error.go create mode 100644 vendor/github.com/robertkrimen/otto/type_function.go create mode 100644 vendor/github.com/robertkrimen/otto/type_go_array.go create mode 100644 vendor/github.com/robertkrimen/otto/type_go_map.go create mode 100644 vendor/github.com/robertkrimen/otto/type_go_slice.go create mode 100644 vendor/github.com/robertkrimen/otto/type_go_struct.go create mode 100644 vendor/github.com/robertkrimen/otto/type_number.go create mode 100644 vendor/github.com/robertkrimen/otto/type_reference.go create mode 100644 vendor/github.com/robertkrimen/otto/type_regexp.go create mode 100644 vendor/github.com/robertkrimen/otto/type_string.go create mode 100644 vendor/github.com/robertkrimen/otto/underscore/Makefile create mode 100644 vendor/github.com/robertkrimen/otto/underscore/README.markdown create mode 100644 vendor/github.com/robertkrimen/otto/underscore/source.go create mode 100755 vendor/github.com/robertkrimen/otto/underscore/testify create mode 100644 vendor/github.com/robertkrimen/otto/underscore/underscore.go create mode 100644 vendor/github.com/robertkrimen/otto/value.go create mode 100644 vendor/github.com/robertkrimen/otto/value_boolean.go create mode 100644 vendor/github.com/robertkrimen/otto/value_number.go create mode 100644 vendor/github.com/robertkrimen/otto/value_primitive.go create mode 100644 vendor/github.com/robertkrimen/otto/value_string.go create mode 100644 vendor/golang.org/x/text/LICENSE create mode 100644 vendor/golang.org/x/text/PATENTS create mode 100644 vendor/golang.org/x/text/encoding/encoding.go create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/gen.go create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/identifier.go create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/mib.go create mode 100644 vendor/golang.org/x/text/encoding/internal/internal.go create mode 100644 vendor/golang.org/x/text/encoding/unicode/override.go create mode 100644 vendor/golang.org/x/text/encoding/unicode/unicode.go create mode 100644 vendor/golang.org/x/text/internal/gen/code.go create mode 100644 vendor/golang.org/x/text/internal/gen/gen.go create mode 100644 vendor/golang.org/x/text/internal/utf8internal/utf8internal.go create mode 100644 vendor/golang.org/x/text/runes/cond.go create mode 100644 vendor/golang.org/x/text/runes/runes.go create mode 100644 vendor/golang.org/x/text/transform/transform.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/base.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/cldr.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/collate.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/decode.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/makexml.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/resolve.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/slice.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/xml.go create mode 100644 vendor/gopkg.in/sourcemap.v1/Makefile create mode 100644 vendor/gopkg.in/sourcemap.v1/README.md create mode 100644 vendor/gopkg.in/sourcemap.v1/base64vlq/base64_vlq.go create mode 100644 vendor/gopkg.in/sourcemap.v1/consumer.go create mode 100644 vendor/vendor.json create mode 100644 web/.babelrc create mode 100644 web/app/components/App.vue create mode 100644 web/app/components/Correction.vue create mode 100644 web/app/components/DNSConfig.vue create mode 100644 web/app/components/Domain.vue create mode 100644 web/app/components/Editor.vue create mode 100644 web/app/components/Metadata.vue create mode 100644 web/app/components/Record.vue create mode 100644 web/app/components/RunResults.vue create mode 100644 web/app/main.js create mode 100644 web/bundle.js create mode 100644 web/package.json create mode 100644 web/static.go create mode 100644 web/web.go create mode 100644 web/webpack.config.js diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..ceeb05b41 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +/tmp diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..f61c584dd --- /dev/null +++ b/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Stack Overflow + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/js/helpers.js b/js/helpers.js new file mode 100644 index 000000000..e64983f0b --- /dev/null +++ b/js/helpers.js @@ -0,0 +1,251 @@ +"use strict"; + +var conf = { + registrars: [], + dns_service_providers: [], + domains: [] +}; + +var defaultDsps = []; + +function initialize(){ + conf = { + registrars: [], + dns_service_providers: [], + domains: [] + }; + defaultDsps = []; +} + +function NewRegistrar(name,type,meta) { + if (type) { + type == "MANUAL"; + } + var reg = {name: name, type: type, meta: meta}; + conf.registrars.push(reg); + return name; +} + +function NewDSP(name, type, meta) { + if ((typeof meta === 'object') && ('ip_conversions' in meta)) { + meta.ip_conversions = format_tt(meta.ip_conversions) + } + var dsp = {name: name, type: type, meta: meta}; + conf.dns_service_providers.push(dsp); + return name; +} + +function newDomain(name,registrar) { + return {name: name, registrar: registrar, meta:{}, records:[], dsps: [], defaultTTL: 0, nameservers:[]}; +} + +function processDargs(m, domain) { + // for each modifier, if it is a... + // function: call it with domain + // array: process recursively + // object: merge it into metadata + // string: assume it is a dsp + if (_.isFunction(m)) { + m(domain); + } else if (_.isArray(m)) { + for (var j in m) { + processDargs(m[j], domain) + } + } else if (_.isObject(m)) { + _.extend(domain.meta,m); + } else if (_.isString(m)) { + domain.dsps.push(m); + } else { + console.log("WARNING: domain modifier type unsupported: ", typeof m, " Domain: ", domain) + } +} + +// D(name,registrar): Create a DNS Domain. Use the parameters as records and mods. +function D(name,registrar) { + var domain = newDomain(name,registrar); + for (var i = 2; i 0; i--) + { + num = Math.floor(num/256); + d = num%256 + '.' + d; + } + return d; +} diff --git a/js/js.go b/js/js.go new file mode 100644 index 000000000..c74277445 --- /dev/null +++ b/js/js.go @@ -0,0 +1,46 @@ +package js + +import ( + "encoding/json" + + "github.com/StackExchange/dnscontrol/models" + + "github.com/robertkrimen/otto" + //load underscore js into vm by default + _ "github.com/robertkrimen/otto/underscore" +) + +//ExecuteJavascript accepts a javascript string and runs it, returning the resulting dnsConfig. +func ExecuteJavascript(script string, devMode bool) (*models.DNSConfig, error) { + vm := otto.New() + + helperJs := GetHelpers(devMode) + // run helper script to prime vm and initialize variables + if _, err := vm.Run(string(helperJs)); err != nil { + return nil, err + } + + // run user script + if _, err := vm.Run(script); err != nil { + return nil, err + } + + // export conf as string and unmarshal + value, err := vm.Run(`JSON.stringify(conf)`) + if err != nil { + return nil, err + } + str, err := value.ToString() + if err != nil { + return nil, err + } + conf := &models.DNSConfig{} + if err = json.Unmarshal([]byte(str), conf); err != nil { + return nil, err + } + return conf, nil +} + +func GetHelpers(devMode bool) string { + return FSMustString(devMode, "/helpers.js") +} diff --git a/js/js_test.go b/js/js_test.go new file mode 100644 index 000000000..51d6283a6 --- /dev/null +++ b/js/js_test.go @@ -0,0 +1,58 @@ +package js + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/StackExchange/dnscontrol/models" +) + +const testDir = "js/parse_tests" + +func TestParsedFiles(t *testing.T) { + os.Chdir("..") // go up a directory so we helpers.js is in a consistent place. + files, err := ioutil.ReadDir(testDir) + if err != nil { + t.Fatal(err) + } + for _, f := range files { + if filepath.Ext(f.Name()) != ".js" { + continue + } + t.Log(f.Name(), "------") + content, err := ioutil.ReadFile(filepath.Join(testDir, f.Name())) + if err != nil { + t.Fatal(err) + } + conf, err := ExecuteJavascript(string(content), true) + if err != nil { + t.Fatal(err) + } + actualJson, err := json.MarshalIndent(conf, "", " ") + if err != nil { + t.Fatal(err) + } + expectedFile := filepath.Join(testDir, f.Name()[:len(f.Name())-3]+".json") + expectedData, err := ioutil.ReadFile(expectedFile) + if err != nil { + t.Fatal(err) + } + conf = &models.DNSConfig{} + err = json.Unmarshal(expectedData, conf) + if err != nil { + t.Fatal(err) + } + expectedJson, err := json.MarshalIndent(conf, "", " ") + if err != nil { + t.Fatal(err) + } + if string(expectedJson) != string(actualJson) { + t.Error("Expected and actual json don't match") + t.Log("Expected:", string(expectedJson)) + t.Log("Actual:", string(actualJson)) + } + } +} diff --git a/js/parse_tests/001-basic.js b/js/parse_tests/001-basic.js new file mode 100644 index 000000000..e339cf244 --- /dev/null +++ b/js/parse_tests/001-basic.js @@ -0,0 +1,6 @@ + +var REG = NewRegistrar("Third-Party","NONE"); +var CF = NewDSP("Cloudflare", "CLOUDFLAREAPI") +D("foo.com",REG,CF, + A("@","1.2.3.4") +); \ No newline at end of file diff --git a/js/parse_tests/001-basic.json b/js/parse_tests/001-basic.json new file mode 100644 index 000000000..da88134ae --- /dev/null +++ b/js/parse_tests/001-basic.json @@ -0,0 +1,30 @@ +{ + "registrars": [ + { + "name": "Third-Party", + "type": "NONE" + } + ], + "dns_service_providers": [ + { + "name": "Cloudflare", + "type": "CLOUDFLAREAPI" + } + ], + "domains": [ + { + "name": "foo.com", + "registrar": "Third-Party", + "dsps": [ + "Cloudflare" + ], + "records": [ + { + "type": "A", + "name": "@", + "target": "1.2.3.4" + } + ] + } + ] + } \ No newline at end of file diff --git a/js/parse_tests/002-ttl.js b/js/parse_tests/002-ttl.js new file mode 100644 index 000000000..198bfdeda --- /dev/null +++ b/js/parse_tests/002-ttl.js @@ -0,0 +1,5 @@ +var REG = NewRegistrar("Third-Party","NONE"); +var CF = NewDSP("Cloudflare", "CLOUDFLAREAPI") +D("foo.com",REG,CF, + A("@","1.2.3.4",TTL(42)) +); \ No newline at end of file diff --git a/js/parse_tests/002-ttl.json b/js/parse_tests/002-ttl.json new file mode 100644 index 000000000..aaf4fe0e1 --- /dev/null +++ b/js/parse_tests/002-ttl.json @@ -0,0 +1,31 @@ +{ + "registrars": [ + { + "name": "Third-Party", + "type": "NONE" + } + ], + "dns_service_providers": [ + { + "name": "Cloudflare", + "type": "CLOUDFLAREAPI" + } + ], + "domains": [ + { + "name": "foo.com", + "registrar": "Third-Party", + "dsps": [ + "Cloudflare" + ], + "records": [ + { + "type": "A", + "name": "@", + "target": "1.2.3.4", + "ttl": 42 + } + ] + } + ] + } \ No newline at end of file diff --git a/js/parse_tests/003-meta.js b/js/parse_tests/003-meta.js new file mode 100644 index 000000000..679a97125 --- /dev/null +++ b/js/parse_tests/003-meta.js @@ -0,0 +1,5 @@ + +var CLOUDFLARE = NewRegistrar("Cloudflare","CLOUDFLAREAPI"); +D("foo.com",CLOUDFLARE, + A("@","1.2.3.4",{"cloudflare_proxy":"ON"}) +); \ No newline at end of file diff --git a/js/parse_tests/003-meta.json b/js/parse_tests/003-meta.json new file mode 100644 index 000000000..8e1a8f917 --- /dev/null +++ b/js/parse_tests/003-meta.json @@ -0,0 +1,26 @@ +{ + "registrars": [ + { + "name": "Cloudflare", + "type": "CLOUDFLAREAPI" + } + ], + "dns_service_providers": [], + "domains": [ + { + "name": "foo.com", + "registrar": "Cloudflare", + "dsps": [], + "records": [ + { + "type": "A", + "name": "@", + "target": "1.2.3.4", + "meta": { + "cloudflare_proxy": "ON" + } + } + ] + } + ] + } \ No newline at end of file diff --git a/js/parse_tests/004-ips.js b/js/parse_tests/004-ips.js new file mode 100644 index 000000000..e2b0eb01b --- /dev/null +++ b/js/parse_tests/004-ips.js @@ -0,0 +1,10 @@ +var REG = NewRegistrar("Third-Party","NONE"); +var CF = NewDSP("Cloudflare", "CLOUDFLAREAPI") + +var BASE = IP("1.2.3.4") + +D("foo.com",REG,CF, + A("@",BASE), + A("p1",BASE+1), + A("p255", BASE+255) +); \ No newline at end of file diff --git a/js/parse_tests/004-ips.json b/js/parse_tests/004-ips.json new file mode 100644 index 000000000..e6673a9ca --- /dev/null +++ b/js/parse_tests/004-ips.json @@ -0,0 +1,28 @@ +{ + "registrars": [ + { + "name": "Third-Party", + "type": "NONE" + } + ], + "dns_service_providers": [ + { + "name": "Cloudflare", + "type": "CLOUDFLAREAPI" + } + ], + "domains": [ + { + "name": "foo.com", + "registrar": "Third-Party", + "dsps": [ + "Cloudflare" + ], + "records": [ + { "type": "A","name": "@","target": "1.2.3.4"}, + { "type": "A","name": "p1","target": "1.2.3.5"}, + { "type": "A","name": "p255","target": "1.2.4.3"} + ] + } + ] + } \ No newline at end of file diff --git a/js/parse_tests/005-multipleDomains.js b/js/parse_tests/005-multipleDomains.js new file mode 100644 index 000000000..cb66017fb --- /dev/null +++ b/js/parse_tests/005-multipleDomains.js @@ -0,0 +1,7 @@ + +var REG = NewRegistrar("Third-Party","NONE"); +var CF = NewDSP("Cloudflare", "CLOUDFLAREAPI") +D("foo.com",REG,CF, + A("@","1.2.3.4") +); +D("foo.com",REG); \ No newline at end of file diff --git a/js/parse_tests/005-multipleDomains.json b/js/parse_tests/005-multipleDomains.json new file mode 100644 index 000000000..53c86952e --- /dev/null +++ b/js/parse_tests/005-multipleDomains.json @@ -0,0 +1,35 @@ + { "registrars": [ + { + "name": "Third-Party", + "type": "NONE" + } + ], + "dns_service_providers": [ + { + "name": "Cloudflare", + "type": "CLOUDFLAREAPI" + } + ], + "domains": [ + { + "name": "foo.com", + "registrar": "Third-Party", + "dsps": [ + "Cloudflare" + ], + "records": [ + { + "type": "A", + "name": "@", + "target": "1.2.3.4" + } + ] + }, + { + "name": "foo.com", + "registrar": "Third-Party", + "dsps": [], + "records": [] + } + ] + } \ No newline at end of file diff --git a/js/static.go b/js/static.go new file mode 100644 index 000000000..b37d98534 --- /dev/null +++ b/js/static.go @@ -0,0 +1,253 @@ +package js + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "io/ioutil" + "net/http" + "os" + "path" + "sync" + "time" +) + +type _escLocalFS struct{} + +var _escLocal _escLocalFS + +type _escStaticFS struct{} + +var _escStatic _escStaticFS + +type _escDirectory struct { + fs http.FileSystem + name string +} + +type _escFile struct { + compressed string + size int64 + modtime int64 + local string + isDir bool + + once sync.Once + data []byte + name string +} + +func (_escLocalFS) Open(name string) (http.File, error) { + f, present := _escData[path.Clean(name)] + if !present { + return nil, os.ErrNotExist + } + return os.Open(f.local) +} + +func (_escStaticFS) prepare(name string) (*_escFile, error) { + f, present := _escData[path.Clean(name)] + if !present { + return nil, os.ErrNotExist + } + var err error + f.once.Do(func() { + f.name = path.Base(name) + if f.size == 0 { + return + } + var gr *gzip.Reader + b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed)) + gr, err = gzip.NewReader(b64) + if err != nil { + return + } + f.data, err = ioutil.ReadAll(gr) + }) + if err != nil { + return nil, err + } + return f, nil +} + +func (fs _escStaticFS) Open(name string) (http.File, error) { + f, err := fs.prepare(name) + if err != nil { + return nil, err + } + return f.File() +} + +func (dir _escDirectory) Open(name string) (http.File, error) { + return dir.fs.Open(dir.name + name) +} + +func (f *_escFile) File() (http.File, error) { + type httpFile struct { + *bytes.Reader + *_escFile + } + return &httpFile{ + Reader: bytes.NewReader(f.data), + _escFile: f, + }, nil +} + +func (f *_escFile) Close() error { + return nil +} + +func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) { + return nil, nil +} + +func (f *_escFile) Stat() (os.FileInfo, error) { + return f, nil +} + +func (f *_escFile) Name() string { + return f.name +} + +func (f *_escFile) Size() int64 { + return f.size +} + +func (f *_escFile) Mode() os.FileMode { + return 0 +} + +func (f *_escFile) ModTime() time.Time { + return time.Unix(f.modtime, 0) +} + +func (f *_escFile) IsDir() bool { + return f.isDir +} + +func (f *_escFile) Sys() interface{} { + return f +} + +// FS returns a http.Filesystem for the embedded assets. If useLocal is true, +// the filesystem's contents are instead used. +func FS(useLocal bool) http.FileSystem { + if useLocal { + return _escLocal + } + return _escStatic +} + +// Dir returns a http.Filesystem for the embedded assets on a given prefix dir. +// If useLocal is true, the filesystem's contents are instead used. +func Dir(useLocal bool, name string) http.FileSystem { + if useLocal { + return _escDirectory{fs: _escLocal, name: name} + } + return _escDirectory{fs: _escStatic, name: name} +} + +// FSByte returns the named file from the embedded assets. If useLocal is +// true, the filesystem's contents are instead used. +func FSByte(useLocal bool, name string) ([]byte, error) { + if useLocal { + f, err := _escLocal.Open(name) + if err != nil { + return nil, err + } + b, err := ioutil.ReadAll(f) + f.Close() + return b, err + } + f, err := _escStatic.prepare(name) + if err != nil { + return nil, err + } + return f.data, nil +} + +// FSMustByte is the same as FSByte, but panics if name is not present. +func FSMustByte(useLocal bool, name string) []byte { + b, err := FSByte(useLocal, name) + if err != nil { + panic(err) + } + return b +} + +// FSString is the string version of FSByte. +func FSString(useLocal bool, name string) (string, error) { + b, err := FSByte(useLocal, name) + return string(b), err +} + +// FSMustString is the string version of FSMustByte. +func FSMustString(useLocal bool, name string) string { + return string(FSMustByte(useLocal, name)) +} + +var _escData = map[string]*_escFile{ + + "/helpers.js": { + local: "js/helpers.js", + size: 6427, + modtime: 0, + compressed: ` +H4sIAAAJbogA/7RY7W/bvBH/nr/iJmC1tOiR89Jkg1wP8560D4rVTpC4WwDDMBiJtplKokDSdrPA+dsH +vkiiXrwkH9oPqUXeHX/3wuPdORuOgQtGIuEMjo62iEFEsyUM4fkIAIDhFeGCIcZDmM19tRZnfMEx25II +L3JGtyTGtW2aIpKphaO9kRnjJdok4ornHIYwmw+OjpabLBKEZkAyIghKyH+x6+lDawgOoXgDkiYa+b0f +aJAtQHsL0gTvbosj3Qyl2BdPOfZTLJBnYJEluHLRK2HKLxgOwRmPJt9H3xx90F79lTZgeCWVkuJCUEIV +S6j++iCFh+qvgSitEFSaB/mGr12GV97AeEZsWKYEtcBf3d241QlatgUcXAWdLtUGDIdD6NGHRxyJngcf +PoDbI/kiotkWM05oxntAMi3Ds5wiF4I6IQxhSVmKxEIIt2Pfa5gk5vn7TdLpdG2dmOevWSfDuysVEtpA +pX29MuAVYw1TSRRWPw26573cjiiLeTib+1IjHYBFhE2n30I48ZUkiVoG6Gy+r4PKGY0w51eIrbib+iZo +bWP3+9KygFG0hpTGZEkw86UviQDCAQVBUKM1kkOIUJJIoh0RayPXJkSMoaewACBV2TBOtjh5sql0cEhX +sBVWR2aCKgPESCCbUqaSbBUC4nyT4gKdNEtJJW/OIiD8i8HoprWwKqLLNUYYlDt7wAnHJf9IQu9glnZy +ZXQ9qrBty65be/Y4Lw1eI9wfOvhaWaPj5EWAfwqcxQZ6IA3kp4c1uFPG6hBk+GUw6cDuEGJzRDTjNMFB +Qleu85/R7eTr5I/QSCnDRSeoTcY3eU6ZwHEIjr5vMhH44IC+GGq5aZC9jNd+H66a1yaE3xlGAgOCq8md +ERHAd45BrDHkiKEUC8w4IF7cFEBZLGHxoLoCLcFGQZUmtCLDw5dXW6f0PIEhnA2AfEJstUlxJniQ4Gwl +1gMgx8e2tSV1CkMoCWdkXpn6wL1sZDFBR3EMQ1i41qviBTFZLjHDWYRdy58G6sJVXF4gb7RbWMH96cFz +2/s/vb1m0/lPv2gm4xlE2jvT6Td364Vwh4Wy/nT6TRlF+0Zb37K5Jq8nvhIKs83EAiESGMK2eNRMNJQ5 +rnasMUN5vFrTSlkOt3kPYIhtDHFQpdQ2lJEOCZIX+Xhswp4HQeBVxxo6ILkdYTIYYQgrLEo2twwJ/8x7 +HR2K41t1rhv7zsjxCzRSsldHOhq9GWxJ+ovxjkb/F/Lvk9H4symEEFth8Qpuix40wy8Erw4z6A26tgbT +++k78JfUvx799H76GvbxvQaTM0IZEU9v06HggpLt3cqcv0EZlcZVKirOsZ4qW1NwxveOD7ZZfWgrO7l7 +h58K4l/vpsnda16SUXj3+fbfn29tBWywDYIG6Fdyn1U/anPXq2YlKjT/7y1k5fFVYS4Yyrj8XAj0kJgO +Rt4Ref5sltBdCKc+rMlqHcKZL1/dfyKOQzif+6C3PxbbF2r7600Il/O5FqNqQ+cUXuAMXuAcXgbwEV7g +Al4AXuDSOdIOSkiGde91ZL/cw5MBEPgEDZBd77eilw1Eg7Z8wiWBQgdDIHmgfg7K7k19es9WXWqVlXrT +q5dlhaxFkKJck/ilv4j3XDQdm/QspsIl3t4LHinJXMd3rFJKlm/dggtOfbpV8jVbSbqTljP75Xl5kNCd +57eXpa+61o1nqy15rvqtm2XlI9N40p3RBV7A8aQ6Eo9RWROa/QE4RUHydXxzfTtdTG9Hk7sv17djHXsJ +khbTzqqKqjJS38H0lrtTT0JN4Y4Pzj/Kgtcvjar/PfcasdULmxfJxuXt5/WkcPP99o/PrqWbXjD44uBf +GOffsx8Z3cnydokSjot0cr1oMZdrB/gF2+DanW9mP+5zgVhXnpzNO0poRTxQVfTBArrK/5JqRuZ2dWz8 +ImnqDa/tE9Xrt3KrOULmk6VJa7KVzDbpg+x8i/4yl6IY5jwAPWcQQERQ3nF5oSeKxTXZ1sZuxFb3ztC0 +JzcRDOHZHk0cTr4+CJGEdrFaPcFqEmDmBmak0d3YxzgiMYYHxHEMNNNTkYL+N/jSaO+5bu9lna3fS9ls +ya/ixatYrztbeUlba+cVrbZcCF+/wPi+kmx19oVipcFt37XiSab2TzpiDkQTWC2ZpJuReW3vbbMDSF2G +Iyt/wjuaeNDqF9FU3n8OgprxBm8zKN2Dkhg+fABrRlFtNJ+UErHFWxuiWaxtxn1rqRxBMBy15w9vp2pY +y9yhVI0Hq0HnvdNhPSmziAvpxk7BbSt0zzDGB4cX9dmFe/eD5DnJVn/ynKYqnc9oHJhhRDFXlfGiUi/J +oRpZlm8KhyWjKayFyMN+nwsU/aBbzJYJ3QURTfuo/7fTk4u/fjzpn56dXl6eyBy+JahgeERbxCNGchGg +B7oRiichDwyxp/5DQnITb8FapNZLeOPGVHhH1igEhhBTEfA8IcLtBb36vNNV/47j2cnc+8vZxaV3LD9O +5571dVb7Op97jQFpUYNs0uJgspRfak68yWK8JBmOPXs6r852ahPvxoxLSmuzZJu0ORHW2fjPZxeXHQ/S +uawN/67yyG+/6ftQyVQQYYzEOlgmlDJ5Zl/qWYWDJR2OoRf04BjiQfvBiqVJ/hcAAP//Ocw/QBsZAAA= +`, + }, + + "/tester.html": { + local: "js/tester.html", + size: 953, + modtime: 0, + compressed: ` +H4sIAAAJbogA/2yST2/bPAzG7/4UhN73YCOr1TrrOjSWDwM6YD10w1AM6IocHFmJ6SliJsnZsiLffZD/ +NAmykynqx4cPKee1X+siymtVVkUEAJA7aXHjwVkpWO39xt1yLqlSafOzVXaXSlrzPrzI0iydpms0aeMY +oPFqZdHvBHN1mV2/uyiz6erqwb+t/N3T5+Zm8XWyfWxvJo/vv325mtbt+vvHD/cPn57o7p4EA2nJObK4 +QiNYacjs1tQ6VuS8t1REOe995guqdoPdelpUxkky3pKGxoFXziub83o6EF799qVVJWAlGBoGln45wa4v +GUjSTrDs8jJ0GblR2PIhWrTekwEyUqP8IVjocO/ihBWPyvmc9/dnZRVuu5ZWuVb7bpAKt0UUna34lvPW +VMo6SVY1LiW7OkpcDAs+XsSJgtIbZd2/kSJatkZ6JAOj7ZfOHhr0WGr8o+Jk1mW2pYXw0CDg/5j9h4Yl +6bbU47Uk40irVNMqDtiQVgE5Op9iZjmkg+K4iCQNf13cODJZF3VcMov2gTwYPgAhSuDl1Sa+Aas8CGCs +lw+niQCWt7oYUkuyMSBgrzNWn8AaCzbBCbuFoaZbzDIGv9soWnaFzzgHIQQwWjRKepaM5afunnGeHDSU +duqYe8b57Lw9D/37/P50Cn4YwyrfWhM+s2gfvb7v3wAAAP//jV4zrLkDAAA= +`, + }, + + "/": { + isDir: true, + local: "js", + }, +} diff --git a/main.go b/main.go new file mode 100644 index 000000000..b32bb57a1 --- /dev/null +++ b/main.go @@ -0,0 +1,321 @@ +package main + +import ( + "bufio" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + + "github.com/StackExchange/dnscontrol/js" + "github.com/StackExchange/dnscontrol/models" + "github.com/StackExchange/dnscontrol/normalize" + "github.com/StackExchange/dnscontrol/providers" + "github.com/StackExchange/dnscontrol/providers/config" + "github.com/StackExchange/dnscontrol/web" + + //Define all known providers here. They should each register themselves with the providers package via init function. + _ "github.com/StackExchange/dnscontrol/providers/activedir" + _ "github.com/StackExchange/dnscontrol/providers/bind" + _ "github.com/StackExchange/dnscontrol/providers/cloudflare" + _ "github.com/StackExchange/dnscontrol/providers/gandi" + _ "github.com/StackExchange/dnscontrol/providers/namecheap" + _ "github.com/StackExchange/dnscontrol/providers/namedotcom" + _ "github.com/StackExchange/dnscontrol/providers/route53" +) + +//go:generate esc -modtime 0 -o js/static.go -pkg js -ignore go -prefix js js +//go:generate esc -modtime 0 -o web/static.go -pkg web -include=bundle\.js -ignore node_modules -prefix web web + +// One of these config options must be set. +var jsFile = flag.String("js", "dnsconfig.js", "Javascript file containing dns config") +var stdin = flag.Bool("stdin", false, "Read domain config JSON from stdin") +var jsonInput = flag.String("json", "", "Read domain config from specified JSON file.") + +var jsonOutputPre = flag.String("debugrawjson", "", "Write JSON intermediate to this file pre-normalization.") +var jsonOutputPost = flag.String("debugjson", "", "During preview, write JSON intermediate to this file instead of stdout.") + +var configFile = flag.String("creds", "creds.json", "Provider credentials JSON file") +var devMode = flag.Bool("dev", false, "Use helpers.js from disk instead of embedded") + +var flagProviders = flag.String("providers", "", "Providers to enable (comma seperated list); default is all-but-bind. Specify 'all' for all (including bind)") +var domains = flag.String("domains", "", "Comma seperated list of domain names to include") + +var interactive = flag.Bool("i", false, "Confirm or Exclude each correction before they run") + +var ( + SHA = "" + Version = "" + BuildTime = "" +) + +func main() { + log.SetFlags(log.LstdFlags | log.Lshortfile) + flag.Parse() + command := flag.Arg(0) + if command == "version" { + printVersion() + return + } + if command == "web" { + runWebServer() + return + } + + var dnsConfig *models.DNSConfig + if *stdin { + log.Fatal("Read from stdin not implemented yet.") + } else if *jsonInput != "" { + log.Fatal("Direct JSON read not implemented") + } else if *jsFile != "" { + text, err := ioutil.ReadFile(*jsFile) + if err != nil { + log.Fatalf("Error reading %v: %v\n", *jsFile, err) + } + dnsConfig, err = js.ExecuteJavascript(string(text), *devMode) + if err != nil { + log.Fatalf("Error executing javasscript in (%v): %v", *jsFile, err) + } + } + + if dnsConfig == nil { + log.Fatal("No config specified.") + } + + if flag.NArg() != 1 { + fmt.Println("Usage: dnscontrol [options] cmd") + fmt.Println(" cmd:") + fmt.Println(" preview: Show changed that would happen.") + fmt.Println(" push: Make changes for real.") + fmt.Println(" version: Print program version string.") + fmt.Println(" print: Print compiled data.") + fmt.Println("") + flag.PrintDefaults() + return + } + if *jsonOutputPre != "" { + dat, _ := json.MarshalIndent(dnsConfig, "", " ") + err := ioutil.WriteFile(*jsonOutputPre, dat, 0644) + if err != nil { + panic(err) + } + } + + errs := normalize.NormalizeAndValidateConfig(dnsConfig) + if len(errs) > 0 { + fmt.Printf("%d Validation errors:\n", len(errs)) + for i, err := range errs { + fmt.Printf("%d: %s\n", i+1, err) + } + } + + if command == "print" { + dat, _ := json.MarshalIndent(dnsConfig, "", " ") + if *jsonOutputPost == "" { + fmt.Println("While running JS:", string(dat)) + } else { + err := ioutil.WriteFile(*jsonOutputPost, dat, 0644) + if err != nil { + panic(err) + } + } + return + } + + providerConfigs, err := config.LoadProviderConfigs(*configFile) + if err != nil { + log.Fatalf("error loading provider configurations: %s", err) + } + registrars, err := providers.CreateRegistrars(dnsConfig, providerConfigs) + if err != nil { + log.Fatalf("Error creating registrars: %v\n", err) + } + dsps, err := providers.CreateDsps(dnsConfig, providerConfigs) + if err != nil { + log.Fatalf("Error creating dsps: %v\n", err) + } + + fmt.Printf("Initialized %d registrars and %d dns service providers.\n", len(registrars), len(dsps)) + anyErrors, totalCorrections := false, 0 + switch command { + case "preview", "push": + DomainLoop: + for _, domain := range dnsConfig.Domains { + if !shouldRunDomain(domain.Name) { + continue + } + fmt.Printf("******************** Domain: %s\n", domain.Name) + for pi, prov := range domain.Dsps { + + dc, err := domain.Copy() + if err != nil { + log.Fatal(err) + } + shouldrun := shouldRunProvider(prov) + if shouldrun { + fmt.Printf("----- DNS Provider: %s\n", prov) + } else { + if pi == 0 { + fmt.Printf("----- DNS Provider: %s (read-only)\n", prov) + } else { + fmt.Printf("----- DNS Provider: %s (skipping)\n", prov) + } + } + dsp, ok := dsps[prov] + if !ok { + log.Fatalf("DSP %s not declared.", prov) + } + corrections, err := dsp.GetDomainCorrections(dc) + if err != nil { + anyErrors = true + fmt.Printf("Error getting corrections: %s\n", err) + continue DomainLoop + } + storeNameservers(dc, domain) + if !shouldrun { + continue + } + totalCorrections += len(corrections) + anyErrors = printOrRunCorrections(corrections, command) || anyErrors + } + if !shouldRunProvider(domain.Registrar) { + continue + } + fmt.Printf("----- Registrar: %s\n", domain.Registrar) + reg, ok := registrars[domain.Registrar] + if !ok { + log.Fatalf("Registrar %s not declared.", reg) + } + if len(domain.Nameservers) == 0 { + //fmt.Printf("No nameservers declared; skipping registrar.\n") + continue + } + dc, err := domain.Copy() + if err != nil { + log.Fatal(err) + } + corrections, err := reg.GetRegistrarCorrections(dc) + if err != nil { + fmt.Printf("Error getting corrections: %s\n", err) + anyErrors = true + continue + } + totalCorrections += len(corrections) + anyErrors = printOrRunCorrections(corrections, command) || anyErrors + } + default: + log.Fatalf("Unknown command %s", command) + } + if os.Getenv("TEAMCITY_VERSION") != "" { + fmt.Fprintf(os.Stderr, "##teamcity[buildStatus status='SUCCESS' text='%d corrections']", totalCorrections) + } + fmt.Printf("Done. %d corrections.\n", totalCorrections) + if anyErrors { + os.Exit(1) + } +} + +var reader = bufio.NewReader(os.Stdin) + +func printOrRunCorrections(corrections []*models.Correction, command string) (anyErrors bool) { + anyErrors = false + if len(corrections) == 0 { + return anyErrors + } + for i, correction := range corrections { + fmt.Printf("#%d: %s\n", i+1, correction.Msg) + if command == "push" { + if *interactive { + fmt.Print("Run? (Y/n): ") + txt, err := reader.ReadString('\n') + run := true + if err != nil { + run = false + } + txt = strings.ToLower(strings.TrimSpace(txt)) + if txt != "y" { + run = false + } + if !run { + fmt.Println("Skipping") + continue + } + } + err := correction.F() + if err != nil { + fmt.Println("FAILURE!", err) + anyErrors = true + } else { + fmt.Println("SUCCESS!") + } + } + } + return anyErrors +} + +func shouldRunProvider(p string) bool { + if *flagProviders == "all" { + return true + } + if *flagProviders == "" { + return p != "bind" + // NOTE(tlim): Hardcoding bind is a hacky way to make it off by default. + // As a result, bind only runs if you list it in -providers or use + // -providers=all. + // If you always want bind to run, call it something else in dnsconfig.js + // for example `NewDSP('bindyes', 'BIND',`. + // We don't want this hack, but we shouldn't need this in the future + // so it doesn't make sense to write a lot of code to make it work. + // In the future, the above `return p != "bind"` can become `return true`. + // Alternatively we might want to add a complex system that permits + // fancy whitelist/blacklisting of providers with defaults and so on. + // In that case, all of this hack will go away. + } + for _, prov := range strings.Split(*flagProviders, ",") { + if prov == p { + return true + } + } + return false +} + +func shouldRunDomain(d string) bool { + if *domains == "" { + return true + } + for _, dom := range strings.Split(*domains, ",") { + if dom == d { + return true + } + } + return false +} + +func storeNameservers(from, to *models.DomainConfig) { + if len(to.Nameservers) == 0 && len(from.Nameservers) > 0 { + to.Nameservers = from.Nameservers + } +} + +// printVersion prints the version banner. +func printVersion() { + if Version == "" { + Version = "dev" + } + sha := "" + if SHA != "" { + sha = fmt.Sprintf(" (%s)", SHA) + } + if BuildTime != "" { + sha = sha + fmt.Sprintf(" built %s", BuildTime) + } + fmt.Printf("dnscontrol %s%s\n", Version, sha) +} + +func runWebServer() { + fmt.Printf("Running Webserver on :8080 (js = %s , creds = %s)", *jsFile, *configFile) + web.Serve(*jsFile, *configFile, *devMode) +} diff --git a/models/dns.go b/models/dns.go new file mode 100644 index 000000000..45d971b5f --- /dev/null +++ b/models/dns.go @@ -0,0 +1,204 @@ +package models + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "log" + "net" + "reflect" + "strconv" + + "github.com/miekg/dns" + "github.com/StackExchange/dnscontrol/transform" +) + +const DefaultTTL = uint32(300) + +type DNSConfig struct { + Registrars []*RegistrarConfig `json:"registrars"` + DNSProviders []*DNSProviderConfig `json:"dns_service_providers"` + Domains []*DomainConfig `json:"domains"` +} + +func (config *DNSConfig) FindDomain(query string) *DomainConfig { + for _, b := range config.Domains { + if b.Name == query { + return b + } + } + return nil +} + +type RegistrarConfig struct { + Name string `json:"name"` + Type string `json:"type"` + Metadata json.RawMessage `json:"meta,omitempty"` +} + +type DNSProviderConfig struct { + Name string `json:"name"` + Type string `json:"type"` + Metadata json.RawMessage `json:"meta,omitempty"` +} + +// RecordConfig stores a DNS record. +// Providers are responsible for validating or normalizing the data +// that goes into a RecordConfig. +// If you update Name, you have to update NameFQDN and vice-versa. +// +// Name: +// This is the shortname i.e. the NameFQDN without the origin suffix. +// It should never have a trailing "." +// It should never be null. It should store It "@", not the apex domain, not null, etc. +// It shouldn't end with the domain origin. If the origin is "foo.com." then +// if Name == "foo.com" then that literally means "foo.com.foo.com." is +// the intended FQDN. +// NameFQDN: +// This is the FQDN version of Name. +// It should never have a trailiing ".". +type RecordConfig struct { + Type string `json:"type"` + Name string `json:"name"` // The short name. See below. + Target string `json:"target"` // If a name, must end with "." + TTL uint32 `json:"ttl,omitempty"` + Metadata map[string]string `json:"meta,omitempty"` + NameFQDN string `json:"-"` // Must end with ".$origin". See below. + Priority uint16 `json:"priority,omitempty"` +} + +func (r *RecordConfig) GetName() string { + return r.NameFQDN +} +func (r *RecordConfig) GetType() string { + return r.Type +} +func (r *RecordConfig) GetContent() string { + return r.Target +} +func (r *RecordConfig) GetComparisionData() string { + mxPrio := "" + if r.Type == "MX" { + mxPrio = fmt.Sprintf(" %d ", r.Priority) + } + return fmt.Sprintf("%d%s", r.TTL, mxPrio) +} + +/// Convert RecordConfig -> dns.RR. +func (r *RecordConfig) RR() dns.RR { + + // Note: The label is a FQDN ending in a ".". It will not put "@" in the Name field. + + // NB(tlim): An alternative way to do this would be + // to create the rr via: rr := TypeToRR[x]() + // then set the parameters. A benchmark may find that + // faster. This was faster to implement. + + rdtype, ok := dns.StringToType[r.Type] + if !ok { + log.Fatalf("No such DNS type as (%#v)\n", r.Type) + } + + hdr := dns.RR_Header{ + Name: r.NameFQDN + ".", + Rrtype: rdtype, + Class: dns.ClassINET, + Ttl: r.TTL, + } + + // Handle some special cases: + switch rdtype { + case dns.TypeMX: + // Has a Priority field. + return &dns.MX{Hdr: hdr, Preference: r.Priority, Mx: r.Target} + case dns.TypeTXT: + // Assure no problems due to quoting/unquoting: + return &dns.TXT{Hdr: hdr, Txt: []string{r.Target}} + default: + } + + var ttl string + if r.TTL == 0 { + ttl = strconv.FormatUint(uint64(DefaultTTL), 10) + } else { + ttl = strconv.FormatUint(uint64(r.TTL), 10) + } + + s := fmt.Sprintf("%s %s IN %s %s", r.NameFQDN, ttl, r.Type, r.Target) + rc, err := dns.NewRR(s) + if err != nil { + log.Fatalf("NewRR rejected RecordConfig: %#v (t=%#v)\n%v\n", s, r.Target, err) + } + return rc +} + +type Nameserver struct { + Name string `json:"name"` // Normalized to a FQDN with NO trailing "." + Target string `json:"target"` +} + +type DomainConfig struct { + Name string `json:"name"` // NO trailing "." + Registrar string `json:"registrar"` + Dsps []string `json:"dsps"` + Metadata map[string]string `json:"meta,omitempty"` + Records []*RecordConfig `json:"records"` + Nameservers []*Nameserver `json:"nameservers,omitempty"` + KeepUnknown bool `json:"keepunknown"` +} + +func (dc *DomainConfig) Copy() (*DomainConfig, error) { + newDc := &DomainConfig{} + err := copyObj(dc, newDc) + return newDc, err +} + +func (r *RecordConfig) Copy() (*RecordConfig, error) { + newR := &RecordConfig{} + err := copyObj(r, newR) + return newR, err +} + +func copyObj(input interface{}, output interface{}) error { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + dec := gob.NewDecoder(buf) + if err := enc.Encode(input); err != nil { + return err + } + if err := dec.Decode(output); err != nil { + return err + } + return nil +} + +func (dc *DomainConfig) HasRecordTypeName(rtype, name string) bool { + for _, r := range dc.Records { + if r.Type == rtype && r.Name == name { + return true + } + } + return false +} + +func InterfaceToIP(i interface{}) (net.IP, error) { + switch v := i.(type) { + case float64: + u := uint32(v) + return transform.UintToIP(u), nil + case string: + if ip := net.ParseIP(v); ip != nil { + return ip, nil + } + return nil, fmt.Errorf("%s is not a valid ip address", v) + default: + return nil, fmt.Errorf("Cannot convert type %s to ip.", reflect.TypeOf(i)) + } +} + +//Correction is anything that can be run. Implementation is up to the specific provider. +type Correction struct { + F func() error `json:"-"` + Msg string +} diff --git a/models/dns_test.go b/models/dns_test.go new file mode 100644 index 000000000..9792b2ee6 --- /dev/null +++ b/models/dns_test.go @@ -0,0 +1,39 @@ +package models + +import ( + "testing" +) + +func TestHasRecordTypeName(t *testing.T) { + x := &RecordConfig{ + Type: "A", + Name: "@", + } + dc := DomainConfig{} + if dc.HasRecordTypeName("A", "@") { + t.Errorf("%v: expected (%v) got (%v)\n", dc.Records, false, true) + } + dc.Records = append(dc.Records, x) + if !dc.HasRecordTypeName("A", "@") { + t.Errorf("%v: expected (%v) got (%v)\n", dc.Records, true, false) + } + if dc.HasRecordTypeName("AAAA", "@") { + t.Errorf("%v: expected (%v) got (%v)\n", dc.Records, false, true) + } +} + +func TestRR(t *testing.T) { + experiment := RecordConfig{ + Type: "A", + Name: "foo", + Target: "1.2.3.4", + TTL: 0, + NameFQDN: "foo.example.com", + Priority: 0, + } + expected := "foo.example.com.\t300\tIN\tA\t1.2.3.4" + found := experiment.RR().String() + if found != expected { + t.Errorf("RR expected (%#v) got (%#v)\n", expected, found) + } +} diff --git a/normalize/validate.go b/normalize/validate.go new file mode 100644 index 000000000..09ff1e1a3 --- /dev/null +++ b/normalize/validate.go @@ -0,0 +1,294 @@ +package normalize + +import ( + "fmt" + "net" + "strings" + + "github.com/miekg/dns" + "github.com/miekg/dns/dnsutil" + "github.com/StackExchange/dnscontrol/models" + "github.com/StackExchange/dnscontrol/transform" +) + +// Returns false if label does not validate. +func assert_no_enddot(label string) error { + if label == "@" { + return nil + } + if len(label) < 1 { + return fmt.Errorf("WARNING: null label.") + } + if label[len(label)-1] == '.' { + return fmt.Errorf("WARNING: label (%v) ends with a (.)", label) + } + return nil +} + +// Returns false if label does not validate. +func assert_no_underscores(label string) error { + if strings.ContainsRune(label, '_') { + return fmt.Errorf("WARNING: label (%v) contains an underscore", label) + } + return nil +} + +// Returns false if target does not validate. +func assert_valid_ipv4(label string) error { + if net.ParseIP(label).To4() == nil { + return fmt.Errorf("WARNING: target (%v) is not an IPv4 address", label) + } + return nil +} + +// Returns false if target does not validate. +func assert_valid_ipv6(label string) error { + if net.ParseIP(label).To16() == nil { + return fmt.Errorf("WARNING: target (%v) is not an IPv6 address", label) + } + return nil +} + +// assert_valid_cname_target returns 1 if target is not valid for cnames. +func assert_valid_target(label string) error { + if label == "@" { + return nil + } + if len(label) < 1 { + return fmt.Errorf("WARNING: null label.") + } + // If it containts a ".", it must end in a ".". + if strings.ContainsRune(label, '.') && label[len(label)-1] != '.' { + return fmt.Errorf("WARNING: label (%v) includes a (.), must end with a (.)", label) + } + return nil +} + +// validateRecordTypes list of valid rec.Type values. Returns true if this is a real DNS record type, false means it is a pseudo-type used internally. +func validateRecordTypes(rec *models.RecordConfig, domain_name string) error { + var valid_types = map[string]bool{ + "A": true, + "AAAA": true, + "CNAME": true, + "IMPORT_TRANSFORM": false, + "MX": true, + "TXT": true, + "NS": true, + } + + if _, ok := valid_types[rec.Type]; !ok { + return fmt.Errorf("Unsupported record type (%v) domain=%v name=%v", rec.Type, domain_name, rec.Name) + } + return nil +} + +// validateTargets returns true if rec.Target is valid for the rec.Type. +func validateTargets(rec *models.RecordConfig, domain_name string) (errs []error) { + label := rec.Name + target := rec.Target + check := func(e error) { + if e != nil { + errs = append(errs, e) + } + } + switch rec.Type { + case "A": + check(assert_no_enddot(label)) + check(assert_no_underscores(label)) + check(assert_valid_ipv4(target)) + case "AAAA": + check(assert_no_enddot(label)) + check(assert_no_underscores(label)) + check(assert_valid_ipv6(target)) + case "CNAME": + check(assert_no_enddot(label)) + check(assert_no_underscores(label)) + check(assert_valid_target(target)) + case "MX": + check(assert_no_enddot(label)) + check(assert_no_underscores(label)) + check(assert_valid_target(target)) + case "NS": + check(assert_no_enddot(label)) + check(assert_no_underscores(label)) + check(assert_valid_target(target)) + case "TXT", "IMPORT_TRANSFORM": + default: + errs = append(errs, fmt.Errorf("Unimplemented record type (%v) domain=%v name=%v", + rec.Type, domain_name, rec.Name)) + } + return +} + +func transform_cname(target, old_domain, new_domain string) string { + // Canonicalize. If it isn't a FQDN, add the new_domain. + result := dnsutil.AddOrigin(target, old_domain) + if dns.IsFqdn(result) { + result = result[:len(result)-1] + } + return dnsutil.AddOrigin(result, new_domain) + "." +} + +// import_transform imports the records of one zone into another, modifying records along the way. +func import_transform(src_domain, dst_domain *models.DomainConfig, transforms []transform.IpConversion) error { + // Read src_domain.Records, transform, and append to dst_domain.Records: + // 1. Skip any that aren't A or CNAMEs. + // 2. Append dest_domainname to the end of the label. + // 3. For CNAMEs, append dest_domainname to the end of the target. + // 4. For As, change the target as described the transforms. + + for _, rec := range src_domain.Records { + var newrec models.RecordConfig + newrec = *rec + newrec.Name = newrec.NameFQDN + newrec.NameFQDN = dnsutil.AddOrigin(newrec.Name, dst_domain.Name) + switch rec.Type { + case "A": + tr, err := transform.TransformIP(net.ParseIP(newrec.Target), transforms) + if err != nil { + return fmt.Errorf("import_transform: TransformIP(%v, %v) returned err=%s", newrec.Target, transforms, err) + } + newrec.Target = tr.String() + case "CNAME": + newrec.Target = transform_cname(newrec.Target, src_domain.Name, dst_domain.Name) + case "MX", "NS", "TXT": + // Not imported. + continue + default: + return fmt.Errorf("import_transform: Unimplemented record type %v (%v)", + rec.Type, rec.Name) + } + dst_domain.Records = append(dst_domain.Records, &newrec) + } + return nil +} + +// deleteImportTransformRecords deletes any IMPORT_TRANSFORM records from a domain. +func deleteImportTransformRecords(domain *models.DomainConfig) { + for i := len(domain.Records) - 1; i >= 0; i-- { + rec := domain.Records[i] + if rec.Type == "IMPORT_TRANSFORM" { + domain.Records = append(domain.Records[:i], domain.Records[i+1:]...) + } + } +} + +func NormalizeAndValidateConfig(config *models.DNSConfig) (errs []error) { + // TODO(tlim): Before any changes are made, we should check the rules + // such as MX/CNAME/NS .Target must be a single word, "@", or FQDN. + // Validate and normalize + for _, domain := range config.Domains { + + // Normalize Nameservers. + for _, ns := range domain.Nameservers { + ns.Name = dnsutil.AddOrigin(ns.Name, domain.Name) + ns.Name = strings.TrimRight(ns.Name, ".") + } + + // Normalize Records. + for _, rec := range domain.Records { + + // Validate the unmodified inputs: + if err := validateRecordTypes(rec, domain.Name); err != nil { + errs = append(errs, err) + } + if errs2 := validateTargets(rec, domain.Name); errs2 != nil { + errs = append(errs, errs2...) + } + + // Canonicalize Targets. + if rec.Type == "CNAME" || rec.Type == "MX" || rec.Type == "NS" { + rec.Target = dnsutil.AddOrigin(rec.Target, domain.Name+".") + } + // Populate FQDN: + rec.NameFQDN = dnsutil.AddOrigin(rec.Name, domain.Name) + } + } + + // Process any pseudo-records: + for _, domain := range config.Domains { + for _, rec := range domain.Records { + if rec.Type == "IMPORT_TRANSFORM" { + table, err := transform.DecodeTransformTable(rec.Metadata["transform_table"]) + if err != nil { + errs = append(errs, err) + continue + } + err = import_transform(config.FindDomain(rec.Target), domain, table) + if err != nil { + errs = append(errs, err) + } + } + } + } + // Clean up: + for _, domain := range config.Domains { + deleteImportTransformRecords(domain) + } + + // Run record transforms + for _, domain := range config.Domains { + if err := applyRecordTransforms(domain); err != nil { + errs = append(errs, err) + } + } + + // Verify all labels are FQDN ending with ".": + for _, domain := range config.Domains { + for _, rec := range domain.Records { + // .Name must NOT end in "." + if rec.Name[len(rec.Name)-1] == '.' { + errs = append(errs, fmt.Errorf("Should not happen: Label ends with '.': %v %v %v %v %v", + domain.Name, rec.Name, rec.NameFQDN, rec.Type, rec.Target)) + } + // .NameFQDN must NOT end in "." + if rec.NameFQDN[len(rec.NameFQDN)-1] == '.' { + errs = append(errs, fmt.Errorf("Should not happen: FQDN ends with '.': %v %v %v %v %v", + domain.Name, rec.Name, rec.NameFQDN, rec.Type, rec.Target)) + } + // .Target MUST end in "." + if rec.Type == "CNAME" || rec.Type == "NS" || rec.Type == "MX" { + if rec.Target[len(rec.Target)-1] != '.' { + errs = append(errs, fmt.Errorf("Should not happen: Target does NOT ends with '.': %v %v %v %v %v", + domain.Name, rec.Name, rec.NameFQDN, rec.Type, rec.Target)) + } + } + } + } + return errs +} + +func applyRecordTransforms(domain *models.DomainConfig) error { + for _, rec := range domain.Records { + if rec.Type != "A" { + continue + } + tt, ok := rec.Metadata["transform"] + if !ok { + continue + } + table, err := transform.DecodeTransformTable(tt) + if err != nil { + return err + } + ip := net.ParseIP(rec.Target) //ip already validated above + newIPs, err := transform.TransformIPToList(net.ParseIP(rec.Target), table) + if err != nil { + return err + } + for i, newIP := range newIPs { + if i == 0 && !newIP.Equal(ip) { + rec.Target = newIP.String() //replace target of first record if different + } else if i > 0 { + // any additional ips need identical records with the alternate ip added to the domain + copy, err := rec.Copy() + if err != nil { + return err + } + copy.Target = newIP.String() + domain.Records = append(domain.Records, copy) + } + } + } + return nil +} diff --git a/normalize/validate_test.go b/normalize/validate_test.go new file mode 100644 index 000000000..0d9e539fb --- /dev/null +++ b/normalize/validate_test.go @@ -0,0 +1,140 @@ +package normalize + +import ( + "github.com/StackExchange/dnscontrol/models" + "testing" +) + +func Test_assert_no_enddot(t *testing.T) { + var tests = []struct { + experiment string + isError bool + }{ + {"@", false}, + {"foo", false}, + {"foo.bar", false}, + {"foo.", true}, + {"foo.bar.", true}, + } + + for _, test := range tests { + err := assert_no_enddot(test.experiment) + checkError(t, err, test.isError, test.experiment) + } +} + +func checkError(t *testing.T, err error, shouldError bool, experiment string) { + if err != nil && !shouldError { + t.Errorf("%v: Error (%v)\n", experiment, err) + } + if err == nil && shouldError { + t.Errorf("%v: Expected error but got none \n", experiment) + } +} + +func Test_assert_no_underscores(t *testing.T) { + var tests = []struct { + experiment string + isError bool + }{ + {"@", false}, + {"foo", false}, + {"_foo", true}, + {"foo_", true}, + {"fo_o", true}, + } + + for _, test := range tests { + err := assert_no_underscores(test.experiment) + checkError(t, err, test.isError, test.experiment) + } +} + +func Test_assert_valid_ipv4(t *testing.T) { + var tests = []struct { + experiment string + isError bool + }{ + {"1.2.3.4", false}, + {"1.2.3.4/10", true}, + {"1.2.3", true}, + {"foo", true}, + } + + for _, test := range tests { + err := assert_valid_ipv4(test.experiment) + checkError(t, err, test.isError, test.experiment) + } +} + +func Test_assert_valid_target(t *testing.T) { + var tests = []struct { + experiment string + isError bool + }{ + {"@", false}, + {"foo", false}, + {"foo.bar.", false}, + {"foo.", false}, + {"foo.bar", true}, + } + + for _, test := range tests { + err := assert_valid_target(test.experiment) + checkError(t, err, test.isError, test.experiment) + } +} + +func Test_transform_cname(t *testing.T) { + var tests = []struct { + experiment string + expected string + }{ + {"@", "old.com.new.com."}, + {"foo", "foo.old.com.new.com."}, + {"foo.bar", "foo.bar.old.com.new.com."}, + {"foo.bar.", "foo.bar.new.com."}, + {"chat.stackexchange.com.", "chat.stackexchange.com.new.com."}, + } + + for _, test := range tests { + actual := transform_cname(test.experiment, "old.com", "new.com") + if test.expected != actual { + t.Errorf("%v: expected (%v) got (%v)\n", test.experiment, test.expected, actual) + } + } +} + +func TestTransforms(t *testing.T) { + var tests = []struct { + givenIP string + expectedRecords []string + }{ + {"0.0.5.5", []string{"2.0.5.5"}}, + {"3.0.5.5", []string{"5.5.5.5"}}, + {"7.0.5.5", []string{"9.9.9.9", "10.10.10.10"}}, + } + const transform = "0.0.0.0~1.0.0.0~2.0.0.0~; 3.0.0.0~4.0.0.0~~5.5.5.5; 7.0.0.0~8.0.0.0~~9.9.9.9,10.10.10.10" + for i, test := range tests { + dc := &models.DomainConfig{ + Records: []*models.RecordConfig{ + {Type: "A", Target: test.givenIP, Metadata: map[string]string{"transform": transform}}, + }, + } + err := applyRecordTransforms(dc) + if err != nil { + t.Errorf("error on test %d: %s", i, err) + continue + } + if len(dc.Records) != len(test.expectedRecords) { + t.Errorf("test %d: expect %d records but found %d", i, len(test.expectedRecords), len(dc.Records)) + continue + } + for r, rec := range dc.Records { + if rec.Target != test.expectedRecords[r] { + t.Errorf("test %d at index %d: records don't match. Expect %s but found %s.", i, r, test.expectedRecords[r], rec.Target) + continue + } + } + } +} diff --git a/providers/activedir/activedirProvider.go b/providers/activedir/activedirProvider.go new file mode 100644 index 000000000..3561e3724 --- /dev/null +++ b/providers/activedir/activedirProvider.go @@ -0,0 +1,37 @@ +package activedir + +import ( + "encoding/json" + "flag" + "fmt" + "runtime" + + "github.com/StackExchange/dnscontrol/providers" +) + +var flagFakePowerShell = flag.Bool("fakeps", false, "ACTIVEDIR: Do not run PowerShell. Open adzonedump.*.json files for input, and write to -psout any PS1 commands that make changes.") +var flagPsFuture = flag.String("psout", "dns_update_commands.ps1", "ACTIVEDIR: Where to write PS1 commands for future execution.") +var flagPsLog = flag.String("pslog", "powershell.log", "ACTIVEDIR: filename of PS1 command log.") + +// This is the struct that matches either (or both) of the Registrar and/or DNSProvider interfaces: +type adProvider struct { + adServer string +} + +// Register with the dnscontrol system. +// This establishes the name (all caps), and the function to call to initialize it. +func init() { + providers.RegisterDomainServiceProviderType("ACTIVEDIRECTORY_PS", newDNS) +} + +func newDNS(config map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) { + if runtime.GOOS == "windows" || *flagFakePowerShell { + srv := config["ADServer"] + if srv == "" { + return nil, fmt.Errorf("ADServer required for Active Directory provider") + } + return &adProvider{adServer: srv}, nil + } + fmt.Printf("WARNING: PowerShell not available. ActiveDirectory will not be updated.\n") + return providers.None{}, nil +} diff --git a/providers/activedir/adzonedump.test2.json b/providers/activedir/adzonedump.test2.json new file mode 100755 index 0000000000000000000000000000000000000000..e3796a28540c91ac5cc3181e00128a9446ebc24f GIT binary patch literal 1822 zcmd6n-Alq?6vm(Hp#MR3UAmP{>#8s>y6A(1ZX!gcVBiUU Wo~i!_cbi$=pzc|oW?DZ-Gy5C7gwsv{ literal 0 HcmV?d00001 diff --git a/providers/activedir/doc.md b/providers/activedir/doc.md new file mode 100644 index 000000000..81e55eb4b --- /dev/null +++ b/providers/activedir/doc.md @@ -0,0 +1,78 @@ +### ActiveDirectory + +This provider updates a DNS Zone in an ActiveDirectory Integrated Zone. + +When run on Windows, AD is updated directly. The code generates +PowerShell commands, executes them, and checks the results. +It leaves behind a log file of the commands that were generated. + +When run on non-Windows, AD isn't updated because we can't execute +PowerShell at this time. Instead of reading the existing zone data +from AD, It learns what +records are in the zone by reading +`adzonedump.{ZONENAME}.json`, a file that must be created beforehand. +It does not actually update AD, it generates a file with PowerShell +commands that would do the updates, which you must execute afterwords. +If the `adzonedump.{ZONENAME}.json` does not exist, the zone is quietly skipped. + +Not implemented: + +* Delete records. This provider will not delete any records. It will only add +and change existing records. See "Note to future devs" below. +* Update TTLs. It ignores TTLs. + + +## required creds.json config + +No "creds.json" configuration is expected. + +## example dns config js: + +``` +var REG_NONE = NewRegistrar('none', 'NONE') +var DSP_ACTIVEDIRECTORY_DS = NewDSP("activedir", "ACTIVEDIRECTORY_PS"); + +D('ds.stackexchange.com', REG_NONE, + DSP_ACTIVEDIRECTORY_DS, +) + + + //records handled by another provider... +); +``` + +## Special Windows stuff + +This provider needs to do 2 things: + +* Get a list of zone records: + * powerShellDump: Runs a PS command that dumps the zone to JSON. + * readZoneDump: Opens a adzonedump.$DOMAINNAME.json file and reads JSON out of it. If the file does not exist, this is considered an error and processing stops. + +* Update records: + * powerShellExec: Execute PS commands that do the update. + * powerShellRecord: Record the PS command that can be run later to do the updates. This file is -psout=dns_update_commands.ps1 + +So what happens when? Well, that's complex. We want both Windows and Linux to be able to use -fakewindows +for either debugging or (on Windows) actual use. However only Windows permits -fakewinows=false and actually executes +the PS code. Here's which algorithm is used for each case: + + * If -fakewindows is used on any system: readZoneDump and powerShellRecord is used. + * On Windows (without -fakewindows): powerShellDump and powerShellExec is used. + * On Linux (wihtout -fakewindows): the provider loads as "NONE" and nothing happens. + + +## Note to future devs + +### Why doesn't this provider delete records? + +Because at this time Stack doesn't fully control AD zones +using dnscontrol. It only needs to add/change records. + +What should we do when it does need to delete them? + +Currently NO_PURGE is a no-op. I would change it to update +domain metadata to flag that deletes should be enabled/disabled. +Then generate the deletes only if this flag exists. To be paranoid, +the func that does the deleting could check this flag to make sure +that it really should be deleting something. diff --git a/providers/activedir/domains.go b/providers/activedir/domains.go new file mode 100644 index 000000000..36ae9260c --- /dev/null +++ b/providers/activedir/domains.go @@ -0,0 +1,293 @@ +package activedir + +import ( + "encoding/json" + "fmt" + "log" + "os" + "strings" + "time" + + "github.com/TomOnTime/utfutil" + "github.com/miekg/dns/dnsutil" + + "github.com/StackExchange/dnscontrol/models" + "github.com/StackExchange/dnscontrol/providers/diff" +) + +const zoneDumpFilenamePrefix = "adzonedump" + +type RecordConfigJson struct { + Name string `json:"hostname"` + Type string `json:"recordtype"` + Data string `json:"recorddata"` + TTL uint32 `json:"timetolive"` +} + +// GetDomainCorrections gets existing records, diffs them against existing, and returns corrections. +func (c *adProvider) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + + // Read foundRecords: + foundRecords, err := c.getExistingRecords(dc.Name) + if err != nil { + return nil, fmt.Errorf("c.getExistingRecords(%v) failed: %v", dc.Name, err) + } + + // Read expectedRecords: + //expectedRecords := make([]*models.RecordConfig, len(dc.Records)) + expectedRecords := make([]diff.Record, len(dc.Records)) + for i, r := range dc.Records { + if r.TTL == 0 { + r.TTL = models.DefaultTTL + } + expectedRecords[i] = r + } + + // Convert to []diff.Records and compare: + foundDiffRecords := make([]diff.Record, 0, len(foundRecords)) + for _, rec := range foundRecords { + foundDiffRecords = append(foundDiffRecords, rec) + } + + _, create, _, mod := diff.IncrementalDiff(foundDiffRecords, expectedRecords) + // NOTE(tlim): This provider does not delete records. If + // you need to delete a record, either delete it manually + // or see providers/activedir/doc.md for implementation tips. + + // Generate changes. + corrections := []*models.Correction{} + for _, d := range create { + corrections = append(corrections, c.createRec(dc.Name, d.Desired.(*models.RecordConfig))...) + } + for _, m := range mod { + corrections = append(corrections, c.modifyRec(dc.Name, m)) + } + return corrections, nil + +} + +// zoneDumpFilename returns the filename to use to write or read +// an activedirectory zone dump for a particular domain. +func zoneDumpFilename(domainname string) string { + return zoneDumpFilenamePrefix + "." + domainname + ".json" +} + +// readZoneDump reads a pre-existing zone dump from adzonedump.*.json. +func (c *adProvider) readZoneDump(domainname string) ([]byte, error) { + // File not found is considered an error. + dat, err := utfutil.ReadFile(zoneDumpFilename(domainname), utfutil.WINDOWS) + if err != nil { + fmt.Println("Powershell to generate zone dump:") + fmt.Println(c.generatePowerShellZoneDump(domainname)) + } + return dat, err +} + +// powerShellLogCommand logs to flagPsLog that a PowerShell command is going to be run. +func powerShellLogCommand(command string) error { + return logHelper(fmt.Sprintf("# %s\r\n%s\r\n", time.Now().UTC(), strings.TrimSpace(command))) +} + +// powerShellLogOutput logs to flagPsLog that a PowerShell command is going to be run. +func powerShellLogOutput(s string) error { + return logHelper(fmt.Sprintf("OUTPUT: START\r\n%s\r\nOUTPUT: END\r\n", s)) +} + +// powerShellLogErr logs that a PowerShell command had an error. +func powerShellLogErr(e error) error { + err := logHelper(fmt.Sprintf("ERROR: %v\r\r", e)) //Log error to powershell.log + if err != nil { + return err //Bubble up error created in logHelper + } + return e //Bubble up original error +} + +func logHelper(s string) error { + logfile, err := os.OpenFile(*flagPsLog, os.O_APPEND|os.O_RDWR|os.O_CREATE, 0660) + if err != nil { + return fmt.Errorf("ERROR: Can not create/append to %#v: %v\n", *flagPsLog, err) + } + _, err = fmt.Fprintln(logfile, s) + if err != nil { + return fmt.Errorf("ERROR: Append to %#v failed: %v\n", *flagPsLog, err) + } + if logfile.Close() != nil { + return fmt.Errorf("ERROR: Closing %#v failed: %v\n", *flagPsLog, err) + } + return nil +} + +// powerShellRecord records that a PowerShell command should be executed later. +func powerShellRecord(command string) error { + recordfile, err := os.OpenFile(*flagPsFuture, os.O_APPEND|os.O_RDWR|os.O_CREATE, 0660) + if err != nil { + return fmt.Errorf("ERROR: Can not create/append to %#v: %v\n", *flagPsFuture, err) + } + _, err = recordfile.WriteString(command) + if err != nil { + return fmt.Errorf("ERROR: Append to %#v failed: %v\n", *flagPsFuture, err) + } + return recordfile.Close() +} + +func (c *adProvider) getExistingRecords(domainname string) ([]*models.RecordConfig, error) { + //log.Printf("getExistingRecords(%s)\n", domainname) + + // Get the JSON either from adzonedump or by running a PowerShell script. + data, err := c.getRecords(domainname) + if err != nil { + return nil, fmt.Errorf("getRecords failed on %#v: %v\n", domainname, err) + } + + var recs []*RecordConfigJson + err = json.Unmarshal(data, &recs) + if err != nil { + return nil, fmt.Errorf("json.Unmarshal failed on %#v: %v\n", domainname, err) + } + + result := make([]*models.RecordConfig, 0, len(recs)) + for i := range recs { + t, err := recs[i].unpackRecord(domainname) + if err == nil { + result = append(result, t) + } + } + + return result, nil +} + +func (r *RecordConfigJson) unpackRecord(origin string) (*models.RecordConfig, error) { + rc := models.RecordConfig{} + + rc.Name = strings.ToLower(r.Name) + rc.NameFQDN = dnsutil.AddOrigin(rc.Name, origin) + rc.Type = r.Type + rc.TTL = r.TTL + + switch rc.Type { + case "A": + rc.Target = r.Data + case "CNAME": + rc.Target = strings.ToLower(r.Data) + case "AAAA", "MX", "NAPTR", "NS", "SOA", "SRV": + return nil, fmt.Errorf("Unimplemented: %v", r.Type) + default: + log.Fatalf("Unhandled models.RecordConfigJson type: %v (%v)\n", rc.Type, r) + } + + return &rc, nil +} + +// powerShellDump runs a PowerShell command to get a dump of all records in a DNS zone. +func (c *adProvider) generatePowerShellZoneDump(domainname string) string { + cmd_txt := `@("REPLACE_WITH_ZONE") | %{ +Get-DnsServerResourceRecord -ComputerName REPLACE_WITH_COMPUTER_NAME -ZoneName $_ | select hostname,recordtype,@{n="timestamp";e={$_.timestamp.tostring()}},@{n="timetolive";e={$_.timetolive.totalseconds}},@{n="recorddata";e={($_.recorddata.ipv4address,$_.recorddata.ipv6address,$_.recorddata.HostNameAlias,"other_record" -ne $null)[0]-as [string]}} | ConvertTo-Json > REPLACE_WITH_FILENAMEPREFIX.REPLACE_WITH_ZONE.json +}` + cmd_txt = strings.Replace(cmd_txt, "REPLACE_WITH_ZONE", domainname, -1) + cmd_txt = strings.Replace(cmd_txt, "REPLACE_WITH_COMPUTER_NAME", c.adServer, -1) + cmd_txt = strings.Replace(cmd_txt, "REPLACE_WITH_FILENAMEPREFIX", zoneDumpFilenamePrefix, -1) + + return cmd_txt +} + +// generatePowerShellCreate generates PowerShell commands to ADD a record. +func (c *adProvider) generatePowerShellCreate(domainname string, rec *models.RecordConfig) string { + + content := rec.Target + + text := "\r\n" // Skip a line. + text += fmt.Sprintf("Add-DnsServerResourceRecord%s", rec.Type) + text += fmt.Sprintf(` -ComputerName "%s"`, c.adServer) + text += fmt.Sprintf(` -ZoneName "%s"`, domainname) + text += fmt.Sprintf(` -Name "%s"`, rec.Name) + switch rec.Type { + case "CNAME": + text += fmt.Sprintf(` -HostNameAlias "%s"`, content) + case "A": + text += fmt.Sprintf(` -IPv4Address "%s"`, content) + case "NS": + text = fmt.Sprintf("\r\n"+`echo "Skipping NS update (%v %v)"`+"\r\n", rec.Name, rec.Target) + default: + panic(fmt.Errorf("ERROR: generatePowerShellCreate() does not yet handle recType=%s recName=%#v content=%#v)\n", rec.Type, rec.Name, content)) + } + text += "\r\n" + + return text +} + +// generatePowerShellModify generates PowerShell commands to MODIFY a record. +func (c *adProvider) generatePowerShellModify(domainname, recName, recType, oldContent, newContent string, oldTTL, newTTL uint32) string { + + var queryField, queryContent string + + switch recType { + case "A": + queryField = "IPv4address" + queryContent = `"` + oldContent + `"` + case "CNAME": + queryField = "HostNameAlias" + queryContent = `"` + oldContent + `"` + default: + panic(fmt.Errorf("ERROR: generatePowerShellModify() does not yet handle recType=%s recName=%#v content=(%#v, %#v)\n", recType, recName, oldContent, newContent)) + } + + text := "\r\n" // Skip a line. + text += fmt.Sprintf(`echo "MODIFY %s %s %s old=%s new=%s"`, recName, domainname, recType, oldContent, newContent) + text += "\r\n" + + text += "$OldObj = Get-DnsServerResourceRecord" + text += fmt.Sprintf(` -ComputerName "%s"`, c.adServer) + text += fmt.Sprintf(` -ZoneName "%s"`, domainname) + text += fmt.Sprintf(` -Name "%s"`, recName) + text += fmt.Sprintf(` -RRType "%s"`, recType) + text += fmt.Sprintf(" | Where-Object {$_.RecordData.%s -eq %s -and $_.HostName -eq \"%s\"}", queryField, queryContent, recName) + text += "\r\n" + text += `if($OldObj.Length -ne $null){ throw "Error, multiple results for Get-DnsServerResourceRecord" }` + text += "\r\n" + + text += "$NewObj = $OldObj.Clone()" + text += "\r\n" + + if oldContent != newContent { + text += fmt.Sprintf(`$NewObj.RecordData.%s = "%s"`, queryField, newContent) + text += "\r\n" + } + + if oldTTL != newTTL { + text += fmt.Sprintf(`$NewObj.TimeToLive = New-TimeSpan -Seconds %d`, newTTL) + text += "\r\n" + } + + text += "Set-DnsServerResourceRecord" + text += fmt.Sprintf(` -ComputerName "%s"`, c.adServer) + text += fmt.Sprintf(` -ZoneName "%s"`, domainname) + text += fmt.Sprintf(` -NewInputObject $NewObj -OldInputObject $OldObj`) + text += "\r\n" + + return text +} + +func (c *adProvider) createRec(domainname string, rec *models.RecordConfig) []*models.Correction { + arr := []*models.Correction{ + { + Msg: fmt.Sprintf("CREATE record: %s %s ttl(%d) %s", rec.Name, rec.Type, rec.TTL, rec.Target), + F: func() error { + return powerShellDoCommand(c.generatePowerShellCreate(domainname, rec)) + }}, + } + return arr +} + +func (c *adProvider) modifyRec(domainname string, m diff.Correlation) *models.Correction { + + old, rec := m.Existing.(*models.RecordConfig), m.Desired.(*models.RecordConfig) + oldContent := old.GetContent() + newContent := rec.GetContent() + + return &models.Correction{ + Msg: m.String(), + F: func() error { + return powerShellDoCommand(c.generatePowerShellModify(domainname, rec.Name, rec.Type, oldContent, newContent, old.TTL, rec.TTL)) + }, + } +} diff --git a/providers/activedir/domains_test.go b/providers/activedir/domains_test.go new file mode 100644 index 000000000..c8b450503 --- /dev/null +++ b/providers/activedir/domains_test.go @@ -0,0 +1,40 @@ +package activedir + +import ( + "fmt" + "testing" + + "github.com/StackExchange/dnscontrol/models" +) + +func TestGetExistingRecords(t *testing.T) { + + cf := &adProvider{} + + *flagFakePowerShell = true + actual, err := cf.getExistingRecords("test2") + if err != nil { + t.Fatal(err) + } + expected := []*models.RecordConfig{ + {Name: "@", NameFQDN: "test2", Type: "A", TTL: 600, Target: "10.166.2.11"}, + //{Name: "_msdcs", NameFQDN: "_msdcs.test2", Type: "NS", TTL: 300, Target: "other_record"}, // Will be filtered. + {Name: "co-devsearch02", NameFQDN: "co-devsearch02.test2", Type: "A", TTL: 3600, Target: "10.8.2.64"}, + {Name: "co-devservice01", NameFQDN: "co-devservice01.test2", Type: "A", TTL: 1200, Target: "10.8.2.48"}, // Downcased. + {Name: "yum", NameFQDN: "yum.test2", Type: "A", TTL: 3600, Target: "10.8.0.59"}, + } + + actualS := "" + for i, x := range actual { + actualS += fmt.Sprintf("%d %v\n", i, x) + } + + expectedS := "" + for i, x := range expected { + expectedS += fmt.Sprintf("%d %v\n", i, x) + } + + if actualS != expectedS { + t.Fatalf("got\n(%s)\nbut expected\n(%s)", actualS, expectedS) + } +} diff --git a/providers/activedir/getzones_other.go b/providers/activedir/getzones_other.go new file mode 100644 index 000000000..ac39fd19c --- /dev/null +++ b/providers/activedir/getzones_other.go @@ -0,0 +1,17 @@ +// +build !windows + +package activedir + +func (c *adProvider) getRecords(domainname string) ([]byte, error) { + if !*flagFakePowerShell { + panic("Can not happen: PowerShell on non-windows") + } + return c.readZoneDump(domainname) +} + +func powerShellDoCommand(command string) error { + if !*flagFakePowerShell { + panic("Can not happen: PowerShell on non-windows") + } + return powerShellRecord(command) +} diff --git a/providers/activedir/getzones_windows.go b/providers/activedir/getzones_windows.go new file mode 100644 index 000000000..893c3a6be --- /dev/null +++ b/providers/activedir/getzones_windows.go @@ -0,0 +1,95 @@ +package activedir + +import ( + "fmt" + "os/exec" + "strconv" + "strings" +) + +func (c *adProvider) getRecords(domainname string) ([]byte, error) { + + if !*flagFakePowerShell { + // If we are using PowerShell, make sure it is enabled + // and then run the PS1 command to generate the adzonedump file. + + if !isPowerShellReady() { + fmt.Printf("\n\n\n") + fmt.Printf("***********************************************\n") + fmt.Printf("PowerShell DnsServer module not installed.\n") + fmt.Printf("See http://social.technet.microsoft.com/wiki/contents/articles/2202.remote-server-administration-tools-rsat-for-windows-client-and-windows-server-dsforum2wiki.aspx\n") + fmt.Printf("***********************************************\n") + fmt.Printf("\n\n\n") + return nil, fmt.Errorf("PowerShell module DnsServer not installed.") + } + + _, err := powerShellExecCombined(c.generatePowerShellZoneDump(domainname)) + if err != nil { + return []byte{}, err + } + } + + // Return the contents of zone.*.json file instead. + return c.readZoneDump(domainname) +} + +func isPowerShellReady() bool { + query, _ := powerShellExec(`(Get-Module -ListAvailable DnsServer) -ne $null`) + q, err := strconv.ParseBool(strings.TrimSpace(string(query))) + if err != nil { + return false + } + return q +} + +func powerShellDoCommand(command string) error { + if *flagFakePowerShell { + // If fake, just record the command. + return powerShellRecord(command) + } + _, err := powerShellExec(command) + return err +} + +func powerShellExec(command string) ([]byte, error) { + // log it. + err := powerShellLogCommand(command) + if err != nil { + return []byte{}, err + } + + // Run it. + out, err := exec.Command("powershell", "-NoProfile", command).CombinedOutput() + if err != nil { + // If there was an error, log it. + powerShellLogErr(err) + } + // Return the result. + return out, err +} + +// powerShellExecCombined runs a PS1 command and logs the output. This is useful when the output should be none or very small. +func powerShellExecCombined(command string) ([]byte, error) { + // log it. + err := powerShellLogCommand(command) + if err != nil { + return []byte{}, err + } + + // Run it. + out, err := exec.Command("powershell", "-NoProfile", command).CombinedOutput() + if err != nil { + // If there was an error, log it. + powerShellLogErr(err) + return out, err + } + + // Log output. + err = powerShellLogOutput(string(out)) + if err != nil { + return []byte{}, err + } + + // Return the result. + return out, err +} diff --git a/providers/activedir/zone.testzone.json b/providers/activedir/zone.testzone.json new file mode 100755 index 0000000000000000000000000000000000000000..99ab33fbfd00a89a566b1b17e036412b9f28192f GIT binary patch literal 6000 zcmeI0>u=Iv6voeI6aNRoXNS~s|KvnS7G*9YZcB_Ijxt#smq29_^}eIu)GrR_zF3f#@=A){F)NnjSV@WXWZC>lfxpfu1dA9jS4lC2ug;-dO&9I^ZTZT32wZ-rA2>)6_YoCapN^=WTRx$6(cW}B} zT6t(40bA?vw827|iHGO_guAdak`{YtO^*lDrhJfimi+&t+QM_k_Vt9{Bb_cgnFHfzrH|twzSGoT4cWsYT4N8=RA=wf=XPIB|`wIiQeX*9h5`Z`j-q z&Ry>MSZ;^kG&AUNHm&6jvrm}U93M|j-~BwK2nInuQZ$aTqO0*F?PVPmxuHVqu>coME?1~VmEFd_Y9LX??rd~T zS>L8IU(HeDdW6qMZh;ST-J25p<_vJ+cW^8+s4{4o_l#vm@~~3FKkAWDVV@qN8g8be zX3j^H4~s})nvW^{t=^tJ;o-VJq26e9kjKIgcqZvD_;{9P)z&&Zwg3Cs>jM7;I48wF literal 0 HcmV?d00001 diff --git a/providers/bind/bindProvider.go b/providers/bind/bindProvider.go new file mode 100644 index 000000000..cff073ec9 --- /dev/null +++ b/providers/bind/bindProvider.go @@ -0,0 +1,304 @@ +package bind + +/* + +bind - + Generate zonefiles suitiable for BIND. + + The zonefiles are read and written to the directory -bind_dir + + If the old zonefiles are readable, we read them to determine + if an update is actually needed. The old zonefile is also used + as the basis for generating the new SOA serial number. + + If -bind_skeletin_src and -bind_skeletin_dst is defined, a + recursive file copy is performed from src to dst. This is + useful for copying named.ca and other static files. + +*/ + +import ( + "encoding/json" + "flag" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/miekg/dns" + "github.com/miekg/dns/dnsutil" + + "github.com/StackExchange/dnscontrol/models" + "github.com/StackExchange/dnscontrol/providers" + "github.com/StackExchange/dnscontrol/providers/diff" +) + +type SoaInfo struct { + Ns string `json:"master"` + Mbox string `json:"mbox"` + Serial uint32 `json:"serial"` + Refresh uint32 `json:"refresh"` + Retry uint32 `json:"retry"` + Expire uint32 `json:"expire"` + Minttl uint32 `json:"minttl"` +} + +func (s SoaInfo) String() string { + return fmt.Sprintf("%s %s %d %d %d %d %d", s.Ns, s.Mbox, s.Serial, s.Refresh, s.Retry, s.Expire, s.Minttl) +} + +type Bind struct { + Default_ns []string `json:"default_ns"` + Default_Soa SoaInfo `json:"default_soa"` +} + +var bindBaseDir = flag.String("bindtree", "zones", "BIND: Directory that stores BIND zonefiles.") + +//var bindSkeletin = flag.String("bind_skeletin", "skeletin/master/var/named/chroot/var/named/master", "") + +func rrToRecord(rr dns.RR, origin string, replace_serial uint32) (models.RecordConfig, uint32) { + // Convert's dns.RR into our native data type (models.RecordConfig). + // Records are translated directly with no changes. + // If it is an SOA for the apex domain and + // replace_serial != 0, change the serial to replace_serial. + // WARNING(tlim): This assumes SOAs do not have serial=0. + // If one is found, we replace it with serial=1. + var old_serial, new_serial uint32 + header := rr.Header() + rc := models.RecordConfig{} + rc.Type = dns.TypeToString[header.Rrtype] + rc.NameFQDN = strings.ToLower(strings.TrimSuffix(header.Name, ".")) + rc.Name = strings.ToLower(dnsutil.TrimDomainName(header.Name, origin)) + rc.TTL = header.Ttl + if rc.TTL == models.DefaultTTL { + rc.TTL = 0 + } + switch v := rr.(type) { + case *dns.A: + rc.Target = v.A.String() + case *dns.CNAME: + rc.Target = v.Target + case *dns.MX: + rc.Target = v.Mx + rc.Priority = v.Preference + case *dns.NS: + rc.Target = v.Ns + case *dns.SOA: + old_serial = v.Serial + if old_serial == 0 { + // For SOA records, we never return a 0 serial number. + old_serial = 1 + } + new_serial = v.Serial + if rc.Name == "@" && replace_serial != 0 { + new_serial = replace_serial + } + rc.Target = fmt.Sprintf("%v %v %v %v %v %v %v", + v.Ns, v.Mbox, new_serial, v.Refresh, v.Retry, v.Expire, v.Minttl) + case *dns.TXT: + rc.Target = strings.Join(v.Txt, " ") + default: + log.Fatalf("Unimplemented zone record type=%s (%v)\n", rc.Type, rr) + } + return rc, old_serial +} + +func makeDefaultSOA(info SoaInfo, origin string) *models.RecordConfig { + // Make a default SOA record in case one isn't found: + soa_rec := models.RecordConfig{ + Type: "SOA", + Name: "@", + } + soa_rec.NameFQDN = dnsutil.AddOrigin(soa_rec.Name, origin) + + if len(info.Ns) == 0 { + info.Ns = "DEFAULT_NOT_SET" + } + if len(info.Mbox) == 0 { + info.Mbox = "DEFAULT_NOT_SET" + } + if info.Serial == 0 { + info.Serial = 1 + } + if info.Refresh == 0 { + info.Refresh = 3600 + } + if info.Retry == 0 { + info.Retry = 600 + } + if info.Expire == 0 { + info.Expire = 604800 + } + if info.Minttl == 0 { + info.Minttl = 1440 + } + soa_rec.Target = info.String() + + return &soa_rec +} + +func makeDefaultNS(origin string, names []string) []*models.RecordConfig { + var result []*models.RecordConfig + for _, n := range names { + rc := &models.RecordConfig{ + Type: "NS", + Name: "@", + Target: n, + } + rc.NameFQDN = dnsutil.AddOrigin(rc.Name, origin) + result = append(result, rc) + } + return result +} + +func (c *Bind) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + + // Phase 1: Copy everything to []*models.RecordConfig: + // expectedRecords < dc.Records[i] + // foundRecords < zonefile + // + // Phase 2: Do any manipulations: + // add NS + // manipulate SOA + // + // Phase 3: Convert to []diff.Records and compare: + // expectedDiffRecords < expectedRecords + // foundDiffRecords < foundRecords + // diff.Inc...(foundDiffRecords, expectedDiffRecords ) + + // Default SOA record. If we see one in the zone, this will be replaced. + soa_rec := makeDefaultSOA(c.Default_Soa, dc.Name) + + // Read expectedRecords: + expectedRecords := make([]*models.RecordConfig, 0, len(dc.Records)) + for i := range dc.Records { + expectedRecords = append(expectedRecords, dc.Records[i]) + } + // Read foundRecords: + foundRecords := make([]*models.RecordConfig, 0) + var old_serial, new_serial uint32 + zonefile := filepath.Join(*bindBaseDir, strings.ToLower(dc.Name)+".zone") + found_fh, err := os.Open(zonefile) + zone_file_found := err == nil + if err != nil && !os.IsNotExist(os.ErrNotExist) { + // Don't whine if the file doesn't exist. However all other + // errors will be reported. + fmt.Printf("Could not read zonefile: %v\n", err) + } else { + for x := range dns.ParseZone(found_fh, dc.Name, zonefile) { + if x.Error != nil { + log.Println("Error in zonefile:", x.Error) + } else { + rec, serial := rrToRecord(x.RR, dc.Name, old_serial) + if serial != 0 && old_serial != 0 { + log.Fatalf("Multiple SOA records in zonefile: %v\n", zonefile) + } + if serial != 0 { + // This was an SOA record. Update the serial. + old_serial = serial + new_serial = generate_serial(old_serial) + // Regenerate with new serial: + *soa_rec, _ = rrToRecord(x.RR, dc.Name, new_serial) + rec = *soa_rec + } + foundRecords = append(foundRecords, &rec) + } + } + } + + // Add NS records: + if len(c.Default_ns) != 0 && !dc.HasRecordTypeName("NS", "@") { + expectedRecords = append(expectedRecords, makeDefaultNS(dc.Name, c.Default_ns)...) + dc.Records = append(dc.Records, makeDefaultNS(dc.Name, c.Default_ns)...) + } + // Add SOA record: + if !dc.HasRecordTypeName("SOA", "@") { + expectedRecords = append(expectedRecords, soa_rec) + dc.Records = append(dc.Records, soa_rec) + } + + // Convert to []diff.Records and compare: + foundDiffRecords := make([]diff.Record, len(foundRecords)) + for i := range foundRecords { + foundDiffRecords[i] = foundRecords[i] + } + expectedDiffRecords := make([]diff.Record, len(expectedRecords)) + for i := range expectedRecords { + expectedDiffRecords[i] = expectedRecords[i] + } + _, create, del, mod := diff.IncrementalDiff(foundDiffRecords, expectedDiffRecords) + + // Print a list of changes. Generate an actual change that is the zone + changes := false + for _, i := range create { + changes = true + if zone_file_found { + fmt.Println(i) + } + } + for _, i := range del { + changes = true + if zone_file_found { + fmt.Println(i) + } + } + for _, i := range mod { + changes = true + if zone_file_found { + fmt.Println(i) + } + } + msg := fmt.Sprintf("GENERATE_ZONEFILE: %s", dc.Name) + if !zone_file_found { + msg = msg + fmt.Sprintf(" (%d records)", len(create)) + } + corrections := []*models.Correction{} + if changes { + corrections = append(corrections, + &models.Correction{ + Msg: msg, + F: func() error { + fmt.Printf("CREATING ZONEFILE: %v\n", zonefile) + zf, err := os.Create(zonefile) + if err != nil { + log.Fatalf("Could not create zonefile: %v", err) + } + zonefilerecords := make([]dns.RR, 0, len(dc.Records)) + for _, r := range dc.Records { + zonefilerecords = append(zonefilerecords, r.RR()) + } + err = WriteZoneFile(zf, zonefilerecords, dc.Name, models.DefaultTTL) + + if err != nil { + log.Fatalf("WriteZoneFile error: %v\n", err) + } + err = zf.Close() + if err != nil { + log.Fatalf("Closing: %v", err) + } + return nil + }, + }) + } + + return corrections, nil +} + +func initBind(config map[string]string, providermeta json.RawMessage) (providers.DNSServiceProvider, error) { + // m -- the json blob from creds.json + // meta -- the json blob from NewReq('name', 'TYPE', meta) + + api := &Bind{} + if len(providermeta) != 0 { + err := json.Unmarshal(providermeta, api) + if err != nil { + return nil, err + } + } + return api, nil +} + +func init() { + providers.RegisterDomainServiceProviderType("BIND", initBind) +} diff --git a/providers/bind/prettyzone.go b/providers/bind/prettyzone.go new file mode 100644 index 000000000..c1989f963 --- /dev/null +++ b/providers/bind/prettyzone.go @@ -0,0 +1,227 @@ +// Generate zonefiles. +// This generates a zonefile that prioritizes beauty over efficiency. +package bind + +import ( + "bytes" + "fmt" + "io" + "log" + "sort" + "strings" + + "github.com/miekg/dns" + "github.com/miekg/dns/dnsutil" +) + +type zoneGenData struct { + Origin string + DefaultTtl uint32 + Records []dns.RR +} + +func (z *zoneGenData) Len() int { return len(z.Records) } +func (z *zoneGenData) Swap(i, j int) { z.Records[i], z.Records[j] = z.Records[j], z.Records[i] } +func (z *zoneGenData) Less(i, j int) bool { + //fmt.Printf("DEBUG: i=%#v j=%#v\n", i, j) + //fmt.Printf("DEBUG: z.Records=%#v\n", len(z.Records)) + a, b := z.Records[i], z.Records[j] + //fmt.Printf("DEBUG: a=%#v b=%#v\n", a, b) + compA, compB := dnsutil.AddOrigin(a.Header().Name, z.Origin+"."), dnsutil.AddOrigin(b.Header().Name, z.Origin+".") + if compA != compB { + if compA == z.Origin+"." { + compA = "@" + } + if compB == z.Origin+"." { + compB = "@" + } + return zoneLabelLess(compA, compB) + } + rrtypeA, rrtypeB := a.Header().Rrtype, b.Header().Rrtype + if rrtypeA != rrtypeB { + return zoneRrtypeLess(rrtypeA, rrtypeB) + } + if rrtypeA == dns.TypeA { + ta2, tb2 := a.(*dns.A), b.(*dns.A) + ipa, ipb := ta2.A.To4(), tb2.A.To4() + if ipa == nil || ipb == nil { + log.Fatalf("should not happen: IPs are not 4 bytes: %#v %#v", ta2, tb2) + } + return bytes.Compare(ipa, ipb) == -1 + } + if rrtypeA == dns.TypeMX { + ta2, tb2 := a.(*dns.MX), b.(*dns.MX) + pa, pb := ta2.Preference, tb2.Preference + return pa < pb + } + return a.String() < b.String() +} + +// WriteZoneFile writes a beautifully formatted zone file. +func WriteZoneFile(w io.Writer, records []dns.RR, origin string, defaultTtl uint32) error { + // This function prioritizes beauty over efficiency. + // * The zone records are sorted by label, grouped by subzones to + // be easy to read and pleasant to the eye. + // * Within a label, SOA and NS records are listed first. + // * MX records are sorted numericly by preference value. + // * A records are sorted by IP address, not lexicographically. + // * Repeated labels are removed. + // * $TTL is used to eliminate clutter. + // * "@" is used instead of the apex domain name. + + z := &zoneGenData{ + Origin: origin, + DefaultTtl: defaultTtl, + } + z.Records = nil + for _, r := range records { + z.Records = append(z.Records, r) + } + return z.generateZoneFileHelper(w) +} + +// generateZoneFileHelper creates a pretty zonefile. +func (z *zoneGenData) generateZoneFileHelper(w io.Writer) error { + + nameShortPrevious := "" + + sort.Sort(z) + fmt.Fprintln(w, "$TTL", z.DefaultTtl) + for i, rr := range z.Records { + line := rr.String() + if line[0] == ';' { + continue + } + hdr := rr.Header() + + items := strings.SplitN(line, "\t", 5) + if len(items) < 5 { + log.Fatalf("Too few items in: %v", line) + } + + // items[0]: name + nameFqdn := hdr.Name + nameShort := dnsutil.TrimDomainName(nameFqdn, z.Origin) + name := nameShort + if i > 0 && nameShort == nameShortPrevious { + name = "" + } else { + name = nameShort + } + nameShortPrevious = nameShort + + // items[1]: ttl + ttl := "" + if hdr.Ttl != z.DefaultTtl && hdr.Ttl != 0 { + ttl = items[1] + } + + // items[2]: class + if hdr.Class != dns.ClassINET { + log.Fatalf("Unimplemented class=%v", items[2]) + } + + // items[3]: type + typeStr := dns.TypeToString[hdr.Rrtype] + + // items[4]: the remaining line + target := items[4] + //if typeStr == "TXT" { + // fmt.Printf("generateZoneFileHelper.go: target=%#v\n", target) + //} + + fmt.Fprintln(w, formatLine([]int{10, 5, 2, 5, 0}, []string{name, ttl, "IN", typeStr, target})) + } + return nil +} + +func formatLine(lengths []int, fields []string) string { + c := 0 + result := "" + for i, length := range lengths { + item := fields[i] + for len(result) < c { + result += " " + } + if item != "" { + result += item + " " + } + c += length + 1 + } + return strings.TrimRight(result, " ") +} + +func zoneLabelLess(a, b string) bool { + // Compare two zone labels for the purpose of sorting the RRs in a Zone. + + // If they are equal, we are done. All other code is simplified + // because we can assume a!=b. + if a == b { + return false + } + + // Sort @ at the top, then *, then everything else lexigraphically. + // i.e. @ always is less. * is is less than everything but @. + if a == "@" { + return true + } + if b == "@" { + return false + } + if a == "*" { + return true + } + if b == "*" { + return false + } + + // Split into elements and match up last elements to first. Compare the + // first non-equal elements. + + as := strings.Split(a, ".") + bs := strings.Split(b, ".") + ia := len(as) - 1 + ib := len(bs) - 1 + + var min int + if ia < ib { + min = len(as) - 1 + } else { + min = len(bs) - 1 + } + + // Skip the matching highest elements, then compare the next item. + for i, j := ia, ib; min >= 0; i, j, min = i-1, j-1, min-1 { + if as[i] != bs[j] { + return as[i] < bs[j] + } + } + // The min top elements were equal, so the shorter name is less. + return ia < ib +} + +func zoneRrtypeLess(a, b uint16) bool { + // Compare two RR types for the purpose of sorting the RRs in a Zone. + + // If they are equal, we are done. All other code is simplified + // because we can assume a!=b. + if a == b { + return false + } + + // List SOAs, then NSs, then all others. + // i.e. SOA is always less. NS is less than everything but SOA. + if a == dns.TypeSOA { + return true + } + if b == dns.TypeSOA { + return false + } + if a == dns.TypeNS { + return true + } + if b == dns.TypeNS { + return false + } + return a < b +} diff --git a/providers/bind/prettyzone_test.go b/providers/bind/prettyzone_test.go new file mode 100644 index 000000000..b908f6471 --- /dev/null +++ b/providers/bind/prettyzone_test.go @@ -0,0 +1,299 @@ +package bind + +import ( + "bytes" + "fmt" + "log" + "math/rand" + "testing" + + "github.com/miekg/dns" + "github.com/miekg/dns/dnsutil" +) + +func parseAndRegen(t *testing.T, buf *bytes.Buffer, expected string) { + // Take a zonefile, parse it, then generate a zone. We should + // get back the same string. + // This is used after any WriteZoneFile test as an extra verification step. + + // Parse the output: + var parsed []dns.RR + for x := range dns.ParseZone(buf, "bosun.org", "bosun.org.zone") { + if x.Error != nil { + log.Fatalf("Error in zonefile: %v", x.Error) + } else { + parsed = append(parsed, x.RR) + } + } + // Generate it back: + buf2 := &bytes.Buffer{} + WriteZoneFile(buf2, parsed, "bosun.org.", 300) + + // Compare: + if buf2.String() != expected { + t.Fatalf("Regenerated zonefile does not match: got=(\n%v\n)\nexpected=(\n%v\n)\n", buf2.String(), expected) + } +} + +// func WriteZoneFile + +func TestWriteZoneFileSimple(t *testing.T) { + r1, _ := dns.NewRR("bosun.org. 300 IN A 192.30.252.153") + r2, _ := dns.NewRR("bosun.org. 300 IN A 192.30.252.154") + r3, _ := dns.NewRR("www.bosun.org. 300 IN CNAME bosun.org.") + buf := &bytes.Buffer{} + WriteZoneFile(buf, []dns.RR{r1, r2, r3}, "bosun.org.", 300) + expected := `$TTL 300 +@ IN A 192.30.252.153 + IN A 192.30.252.154 +www IN CNAME bosun.org. +` + if buf.String() != expected { + t.Log(buf.String()) + t.Log(expected) + t.Fatalf("Zone file does not match.") + } + + parseAndRegen(t, buf, expected) +} + +func TestWriteZoneFileMx(t *testing.T) { + //exhibits explicit ttls and long name + r1, _ := dns.NewRR(`bosun.org. 300 IN TXT "aaa"`) + r2, _ := dns.NewRR(`bosun.org. 300 IN TXT "bbb"`) + r2.(*dns.TXT).Txt[0] = `b"bb` + r3, _ := dns.NewRR("bosun.org. 300 IN MX 1 ASPMX.L.GOOGLE.COM.") + r4, _ := dns.NewRR("bosun.org. 300 IN MX 5 ALT1.ASPMX.L.GOOGLE.COM.") + r5, _ := dns.NewRR("bosun.org. 300 IN MX 10 ASPMX3.GOOGLEMAIL.COM.") + r6, _ := dns.NewRR("bosun.org. 300 IN A 198.252.206.16") + r7, _ := dns.NewRR("*.bosun.org. 600 IN A 198.252.206.16") + r8, _ := dns.NewRR(`_domainkey.bosun.org. 300 IN TXT "vvvv"`) + r9, _ := dns.NewRR(`google._domainkey.bosun.org. 300 IN TXT "\"foo\""`) + buf := &bytes.Buffer{} + WriteZoneFile(buf, []dns.RR{r1, r2, r3, r4, r5, r6, r7, r8, r9}, "bosun.org", 300) + if buf.String() != testdataZFMX { + t.Log(buf.String()) + t.Log(testdataZFMX) + t.Fatalf("Zone file does not match.") + } + parseAndRegen(t, buf, testdataZFMX) +} + +var testdataZFMX = `$TTL 300 +@ IN A 198.252.206.16 + IN MX 1 ASPMX.L.GOOGLE.COM. + IN MX 5 ALT1.ASPMX.L.GOOGLE.COM. + IN MX 10 ASPMX3.GOOGLEMAIL.COM. + IN TXT "aaa" + IN TXT "b\"bb" +* 600 IN A 198.252.206.16 +_domainkey IN TXT "vvvv" +google._domainkey IN TXT "\"foo\"" +` + +func TestWriteZoneFileOrder(t *testing.T) { + var records []dns.RR + for i, td := range []string{ + "@", + "@", + "@", + "stackoverflow.com.", + "*", + "foo", + "bar.foo", + "hip.foo", + "mup", + "a.mup", + "bzt.mup", + "aaa.bzt.mup", + "zzz.bzt.mup", + "nnn.mup", + "zt.mup", + "zap", + } { + name := dnsutil.AddOrigin(td, "stackoverflow.com.") + r, _ := dns.NewRR(fmt.Sprintf("%s 300 IN A 1.2.3.%d", name, i)) + records = append(records, r) + } + records[0].Header().Name = "stackoverflow.com." + records[1].Header().Name = "@" + + buf := &bytes.Buffer{} + WriteZoneFile(buf, records, "stackoverflow.com.", 300) + // Compare + if buf.String() != testdataOrder { + t.Log(buf.String()) + t.Log(testdataOrder) + t.Fatalf("Zone file does not match.") + } + parseAndRegen(t, buf, testdataOrder) + + // Now shuffle the list many times and make sure it still works: + + for iteration := 5; iteration > 0; iteration-- { + // Randomize the list: + perm := rand.Perm(len(records)) + for i, v := range perm { + records[i], records[v] = records[v], records[i] + //fmt.Println(i, v) + } + // Generate + buf := &bytes.Buffer{} + WriteZoneFile(buf, records, "stackoverflow.com.", 300) + // Compare + if buf.String() != testdataOrder { + t.Log(buf.String()) + t.Log(testdataOrder) + t.Fatalf("Zone file does not match.") + } + parseAndRegen(t, buf, testdataOrder) + } +} + +var testdataOrder = `$TTL 300 +@ IN A 1.2.3.0 + IN A 1.2.3.1 + IN A 1.2.3.2 + IN A 1.2.3.3 +* IN A 1.2.3.4 +foo IN A 1.2.3.5 +bar.foo IN A 1.2.3.6 +hip.foo IN A 1.2.3.7 +mup IN A 1.2.3.8 +a.mup IN A 1.2.3.9 +bzt.mup IN A 1.2.3.10 +aaa.bzt.mup IN A 1.2.3.11 +zzz.bzt.mup IN A 1.2.3.12 +nnn.mup IN A 1.2.3.13 +zt.mup IN A 1.2.3.14 +zap IN A 1.2.3.15 +` + +// func formatLine + +func TestFormatLine(t *testing.T) { + tests := []struct { + lengths []int + fields []string + expected string + }{ + {[]int{2, 2, 0}, []string{"a", "b", "c"}, "a b c"}, + {[]int{2, 2, 0}, []string{"aaaaa", "b", "c"}, "aaaaa b c"}, + } + for _, ts := range tests { + actual := formatLine(ts.lengths, ts.fields) + if actual != ts.expected { + t.Errorf("\"%s\" != \"%s\"", actual, ts.expected) + } + } +} + +// func zoneLabelLess + +func TestZoneLabelLess(t *testing.T) { + /* + The zone should sort in prefix traversal order: + + @ + * + foo + bar.foo + hip.foo + mup + a.mup + bzt.mup + aaa.bzt.mup + zzz.bzt.mup + nnn.mup + zt.mup + zap + */ + + var tests = []struct { + e1, e2 string + expected bool + }{ + {"@", "@", false}, + {"@", "*", true}, + {"@", "b", true}, + {"*", "@", false}, + {"*", "*", false}, + {"*", "b", true}, + {"foo", "foo", false}, + {"foo", "bar", false}, + {"bar", "foo", true}, + {"a.mup", "mup", false}, + {"mup", "a.mup", true}, + {"a.mup", "a.mup", false}, + {"a.mup", "bzt.mup", true}, + {"a.mup", "aa.mup", true}, + {"zt.mup", "aaa.bzt.mup", false}, + {"aaa.bzt.mup", "mup", false}, + {"nnn.mup", "aaa.bzt.mup", false}, + {`www\.miek.nl`, `www.miek.nl`, false}, + } + + for _, test := range tests { + actual := zoneLabelLess(test.e1, test.e2) + if test.expected != actual { + t.Errorf("%v: expected (%v) got (%v)\n", test.e1, test.e2, actual) + } + actual = zoneLabelLess(test.e2, test.e1) + // The reverse should work too: + var expected bool + if test.e1 == test.e2 { + expected = false + } else { + expected = !test.expected + } + if expected != actual { + t.Errorf("%v: expected (%v) got (%v)\n", test.e1, test.e2, actual) + } + } +} + +func TestZoneRrtypeLess(t *testing.T) { + /* + In zonefiles we want to list SOAs, then NSs, then all others. + */ + + var tests = []struct { + e1, e2 uint16 + expected bool + }{ + {dns.TypeSOA, dns.TypeSOA, false}, + {dns.TypeSOA, dns.TypeA, true}, + {dns.TypeSOA, dns.TypeTXT, true}, + {dns.TypeSOA, dns.TypeNS, true}, + {dns.TypeNS, dns.TypeSOA, false}, + {dns.TypeNS, dns.TypeA, true}, + {dns.TypeNS, dns.TypeTXT, true}, + {dns.TypeNS, dns.TypeNS, false}, + {dns.TypeA, dns.TypeSOA, false}, + {dns.TypeA, dns.TypeA, false}, + {dns.TypeA, dns.TypeTXT, true}, + {dns.TypeA, dns.TypeNS, false}, + {dns.TypeMX, dns.TypeSOA, false}, + {dns.TypeMX, dns.TypeA, false}, + {dns.TypeMX, dns.TypeTXT, true}, + {dns.TypeMX, dns.TypeNS, false}, + } + + for _, test := range tests { + actual := zoneRrtypeLess(test.e1, test.e2) + if test.expected != actual { + t.Errorf("%v: expected (%v) got (%v)\n", test.e1, test.e2, actual) + } + actual = zoneRrtypeLess(test.e2, test.e1) + // The reverse should work too: + var expected bool + if test.e1 == test.e2 { + expected = false + } else { + expected = !test.expected + } + if expected != actual { + t.Errorf("%v: expected (%v) got (%v)\n", test.e1, test.e2, actual) + } + } +} diff --git a/providers/bind/serial.go b/providers/bind/serial.go new file mode 100644 index 000000000..5c3bf6903 --- /dev/null +++ b/providers/bind/serial.go @@ -0,0 +1,71 @@ +package bind + +import ( + "log" + "strconv" + "strings" + "time" +) + +var nowFunc func() time.Time = time.Now + +// generate_serial takes an old SOA serial number and increments it. +func generate_serial(old_serial uint32) uint32 { + // Serial numbers are in the format yyyymmddvv + // where vv is a version count that starts at 01 each day. + // Multiple serial numbers generated on the same day increase vv. + // If the old serial number is not in this format, it gets replaced + // with the new format. However if that would mean a new serial number + // that is smaller than the old one, we punt and increment the old number. + // At no time will a serial number == 0 be returned. + + original := old_serial + old_serialStr := strconv.FormatUint(uint64(old_serial), 10) + var new_serial uint32 + + // Make draft new serial number: + today := nowFunc().UTC() + todayStr := today.Format("20060102") + version := uint32(1) + todayNum, err := strconv.ParseUint(todayStr, 10, 32) + if err != nil { + log.Fatalf("new serial won't fit in 32 bits: %v", err) + } + draft := uint32(todayNum)*100 + version + + method := "none" // Used only in debugging. + if old_serial > draft { + // If old_serial was really slow, upgrade to new yyyymmddvv standard: + method = "o>d" + new_serial = old_serial + 1 + new_serial = old_serial + 1 + } else if old_serial == draft { + // Edge case: increment old serial: + method = "o=d" + new_serial = draft + 1 + } else if len(old_serialStr) != 10 { + // If old_serial is wrong number of digits, upgrade to yyyymmddvv standard: + method = "len!=10" + new_serial = draft + } else if strings.HasPrefix(old_serialStr, todayStr) { + // If old_serial just needs to be incremented: + method = "prefix" + new_serial = old_serial + 1 + } else { + // First serial number to be requested today: + method = "default" + new_serial = draft + } + + if new_serial == 0 { + // We never return 0 as the serial number. + new_serial = 1 + } + if old_serial == new_serial { + log.Fatalf("%v: old_serial == new_serial (%v == %v) draft=%v method=%v", original, old_serial, new_serial, draft, method) + } + if old_serial > new_serial { + log.Fatalf("%v: old_serial > new_serial (%v > %v) draft=%v method=%v", original, old_serial, new_serial, draft, method) + } + return new_serial +} diff --git a/providers/bind/serial_test.go b/providers/bind/serial_test.go new file mode 100644 index 000000000..ea43b1d5f --- /dev/null +++ b/providers/bind/serial_test.go @@ -0,0 +1,51 @@ +package bind + +import ( + "testing" + "time" +) + +func Test_generate_serial_1(t *testing.T) { + d1, _ := time.Parse("20060102", "20150108") + d4, _ := time.Parse("20060102", "40150108") + d12, _ := time.Parse("20060102", "20151231") + var tests = []struct { + Given uint32 + Today time.Time + Expected uint32 + }{ + {0, d1, 2015010801}, + {123, d1, 2015010801}, + {2015010800, d1, 2015010801}, + {2015010801, d1, 2015010802}, + {2015010802, d1, 2015010803}, + {2015010898, d1, 2015010899}, + {2015010899, d1, 2015010900}, + {2015090401, d1, 2015090402}, + {201509040, d1, 2015010801}, + {20150904, d1, 2015010801}, + {2015090, d1, 2015010801}, + // Verify 32-bits is enough to carry us 200 years in the future: + {4015090401, d4, 4015090402}, + // Verify Dec 31 edge-case: + {2015123099, d12, 2015123101}, + {2015123100, d12, 2015123101}, + {2015123101, d12, 2015123102}, + {2015123102, d12, 2015123103}, + {2015123198, d12, 2015123199}, + {2015123199, d12, 2015123200}, + {2015123200, d12, 2015123201}, + {201512310, d12, 2015123101}, + } + + for i, tst := range tests { + expected := tst.Expected + nowFunc = func() time.Time { + return tst.Today + } + found := generate_serial(tst.Given) + if expected != found { + t.Fatalf("Test:%d/%v: Expected (%d) got (%d)\n", i, tst.Given, expected, found) + } + } +} diff --git a/providers/cloudflare/cloudflareProvider.go b/providers/cloudflare/cloudflareProvider.go new file mode 100644 index 000000000..556b9f2a8 --- /dev/null +++ b/providers/cloudflare/cloudflareProvider.go @@ -0,0 +1,306 @@ +package cloudflare + +import ( + "encoding/json" + "fmt" + "log" + "net" + "strings" + "time" + + "github.com/miekg/dns/dnsutil" + "github.com/StackExchange/dnscontrol/models" + "github.com/StackExchange/dnscontrol/providers" + "github.com/StackExchange/dnscontrol/providers/diff" + "github.com/StackExchange/dnscontrol/transform" +) + +/* + +Cloudflare APi DNS provider: + +Info required in `creds.json`: + - apikey + - apiuser + +Record level metadata availible: + - cloudflare_proxy ("true" or "false") + +Domain level metadata availible: + - cloudflare_proxy_default ("true" or "false") + + Provider level metadata availible: + - ip_conversions + - secret_ips +*/ + +type CloudflareApi struct { + ApiKey string `json:"apikey"` + ApiUser string `json:"apiuser"` + domainIndex map[string]string + nameservers map[string][]*models.Nameserver + ipConversions []transform.IpConversion + secretIPs []net.IP + ignoredLabels []string +} + +func labelMatches(label string, matches []string) bool { + //log.Printf("DEBUG: labelMatches(%#v, %#v)\n", label, matches) + for _, tst := range matches { + if label == tst { + return true + } + } + return false +} + +func (c *CloudflareApi) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + if c.domainIndex == nil { + if err := c.fetchDomainList(); err != nil { + return nil, err + } + } + id, ok := c.domainIndex[dc.Name] + if !ok { + return nil, fmt.Errorf("%s not listed in zones for cloudflare account", dc.Name) + } + + dc.Nameservers = c.nameservers[dc.Name] + if err := c.preprocessConfig(dc); err != nil { + return nil, err + } + records, err := c.getRecordsForDomain(id) + if err != nil { + return nil, err + } + //for _, rec := range records { + for i := len(records) - 1; i >= 0; i-- { + rec := records[i] + // Delete ignore labels + if labelMatches(dnsutil.TrimDomainName(rec.(*cfRecord).Name, dc.Name), c.ignoredLabels) { + fmt.Printf("ignored_label: %s\n", rec.(*cfRecord).Name) + records = append(records[:i], records[i+1:]...) + } + //normalize cname,mx,ns records with dots to be consistent with our config format. + t := rec.(*cfRecord).Type + if t == "CNAME" || t == "MX" || t == "NS" { + rec.(*cfRecord).Content = dnsutil.AddOrigin(rec.(*cfRecord).Content+".", dc.Name) + } + } + + expectedRecords := make([]diff.Record, 0, len(dc.Records)) + for _, rec := range dc.Records { + if labelMatches(rec.Name, c.ignoredLabels) { + log.Fatalf("FATAL: dnsconfig contains label that matches ignored_labels: %#v is in %v)\n", rec.Name, c.ignoredLabels) + // Since we log.Fatalf, we don't need to be clean here. + } + expectedRecords = append(expectedRecords, recordWrapper{rec}) + } + _, create, del, mod := diff.IncrementalDiff(records, expectedRecords) + corrections := []*models.Correction{} + + for _, d := range del { + corrections = append(corrections, c.deleteRec(d.Existing.(*cfRecord), id)) + } + for _, d := range create { + corrections = append(corrections, c.createRec(d.Desired.(recordWrapper).RecordConfig, id)...) + } + + for _, d := range mod { + e, rec := d.Existing.(*cfRecord), d.Desired.(recordWrapper) + proxy := e.Proxiable && rec.Metadata[metaProxy] != "off" + corrections = append(corrections, &models.Correction{ + Msg: fmt.Sprintf("MODIFY record %s %s: (%s %s) => (%s %s)", rec.Name, rec.Type, e.Content, e.GetComparisionData(), rec.Target, rec.GetComparisionData()), + F: func() error { return c.modifyRecord(id, e.ID, proxy, rec.RecordConfig) }, + }) + } + return corrections, nil +} + +const ( + metaProxy = "cloudflare_proxy" + metaProxyDefault = metaProxy + "_default" + metaOriginalIP = "original_ip" // TODO(tlim): Unclear what this means. + metaIPConversions = "ip_conversions" // TODO(tlim): Rename to obscure_rules. + metaSecretIPs = "secret_ips" // TODO(tlim): Rename to obscured_cidrs. +) + +func checkProxyVal(v string) (string, error) { + v = strings.ToLower(v) + if v != "on" && v != "off" && v != "full" { + return "", fmt.Errorf("Bad metadata value for cloudflare_proxy: '%s'. Use on/off/full", v) + } + return v, nil +} + +func (c *CloudflareApi) preprocessConfig(dc *models.DomainConfig) error { + + // Determine the default proxy setting. + var defProxy string + var err error + if defProxy = dc.Metadata[metaProxyDefault]; defProxy == "" { + defProxy = "off" + } else { + defProxy, err = checkProxyVal(defProxy) + if err != nil { + return err + } + } + + // Normalize the proxy setting for each record. + // A and CNAMEs: Validate. If null, set to default. + // else: Make sure it wasn't set. Set to default. + for _, rec := range dc.Records { + if rec.Type != "A" && rec.Type != "CNAME" && rec.Type != "AAAA" { + if rec.Metadata[metaProxy] != "" { + return fmt.Errorf("cloudflare_proxy set on %v record: %#v cloudflare_proxy=%#v", rec.Type, rec.Name, rec.Metadata[metaProxy]) + } + // Force it to off. + rec.Metadata[metaProxy] = "off" + } else { + if val := rec.Metadata[metaProxy]; val == "" { + rec.Metadata[metaProxy] = defProxy + } else { + val, err := checkProxyVal(val) + if err != nil { + return err + } + rec.Metadata[metaProxy] = val + } + } + } + + // look for ip conversions and transform records + for _, rec := range dc.Records { + if rec.TTL == 0 { + rec.TTL = 1 + } + if rec.Type != "A" { + continue + } + //only transform "full" + if rec.Metadata[metaProxy] != "full" { + continue + } + ip := net.ParseIP(rec.Target) + if ip == nil { + return fmt.Errorf("%s is not a valid ip address", rec.Target) + } + newIP, err := transform.TransformIP(ip, c.ipConversions) + if err != nil { + return err + } + rec.Metadata[metaOriginalIP] = rec.Target + rec.Target = newIP.String() + } + + return nil +} + +func newCloudflare(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) { + api := &CloudflareApi{} + api.ApiUser, api.ApiKey = m["apiuser"], m["apikey"] + // check api keys from creds json file + if api.ApiKey == "" || api.ApiUser == "" { + return nil, fmt.Errorf("Cloudflare apikey and apiuser must be provided.") + } + + if len(metadata) > 0 { + parsedMeta := &struct { + IPConversions string `json:"ip_conversions"` + SecretIps []interface{} `json:"secret_ips"` + IgnoredLabels []string `json:"ignored_labels"` + }{} + err := json.Unmarshal([]byte(metadata), parsedMeta) + if err != nil { + return nil, err + } + // ignored_labels: + for _, l := range parsedMeta.IgnoredLabels { + api.ignoredLabels = append(api.ignoredLabels, l) + } + // parse provider level metadata + api.ipConversions, err = transform.DecodeTransformTable(parsedMeta.IPConversions) + if err != nil { + return nil, err + } + ips := []net.IP{} + for _, ipStr := range parsedMeta.SecretIps { + var ip net.IP + if ip, err = models.InterfaceToIP(ipStr); err != nil { + return nil, err + } + ips = append(ips, ip) + } + api.secretIPs = ips + } + return api, nil +} + +func init() { + providers.RegisterDomainServiceProviderType("CLOUDFLAREAPI", newCloudflare) +} + +// Used on the "existing" records. +type cfRecord struct { + ID string `json:"id"` + Type string `json:"type"` + Name string `json:"name"` + Content string `json:"content"` + Proxiable bool `json:"proxiable"` + Proxied bool `json:"proxied"` + TTL int `json:"ttl"` + Locked bool `json:"locked"` + ZoneID string `json:"zone_id"` + ZoneName string `json:"zone_name"` + CreatedOn time.Time `json:"created_on"` + ModifiedOn time.Time `json:"modified_on"` + Data interface{} `json:"data"` + Priority int `json:"priority"` +} + +func (c *cfRecord) GetName() string { + return c.Name +} + +func (c *cfRecord) GetType() string { + return c.Type +} + +func (c *cfRecord) GetContent() string { + return c.Content +} + +func (c *cfRecord) GetComparisionData() string { + mxPrio := "" + if c.Type == "MX" { + mxPrio = fmt.Sprintf(" %d ", c.Priority) + } + proxy := "" + if c.Type == "A" || c.Type == "CNAME" || c.Type == "AAAA" { + proxy = fmt.Sprintf(" proxy=%v ", c.Proxied) + } + return fmt.Sprintf("%d%s%s", c.TTL, mxPrio, proxy) +} + +// Used on the "expected" records. +type recordWrapper struct { + *models.RecordConfig +} + +func (c recordWrapper) GetComparisionData() string { + mxPrio := "" + if c.Type == "MX" { + mxPrio = fmt.Sprintf(" %d ", c.Priority) + } + proxy := "" + if c.Type == "A" || c.Type == "AAAA" || c.Type == "CNAME" { + proxy = fmt.Sprintf(" proxy=%v ", c.Metadata[metaProxy] != "off") + } + + ttl := c.TTL + if ttl == 0 { + ttl = 1 + } + return fmt.Sprintf("%d%s%s", ttl, mxPrio, proxy) +} diff --git a/providers/cloudflare/preprocess_test.go b/providers/cloudflare/preprocess_test.go new file mode 100644 index 000000000..3491b5689 --- /dev/null +++ b/providers/cloudflare/preprocess_test.go @@ -0,0 +1,116 @@ +package cloudflare + +import ( + "net" + "testing" + + "github.com/StackExchange/dnscontrol/models" + "github.com/StackExchange/dnscontrol/transform" +) + +func newDomainConfig() *models.DomainConfig { + return &models.DomainConfig{ + Name: "test.com", + Records: []*models.RecordConfig{}, + Metadata: map[string]string{}, + } +} + +func TestPreprocess_BoolValidation(t *testing.T) { + cf := &CloudflareApi{} + domain := newDomainConfig() + domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "on"}}) + domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "fUll"}}) + domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{}}) + domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "Off"}}) + domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "off"}}) + err := cf.preprocessConfig(domain) + if err != nil { + t.Fatal(err) + } + expected := []string{"on", "full", "off", "off", "off"} + // make sure only "on" or "off", and "full" are actually set + for i, rec := range domain.Records { + if rec.Metadata[metaProxy] != expected[i] { + t.Fatalf("At index %d: expect '%s' but found '%s'", i, expected[i], rec.Metadata[metaProxy]) + } + } +} + +func TestPreprocess_BoolValidation_Fails(t *testing.T) { + cf := &CloudflareApi{} + domain := newDomainConfig() + domain.Records = append(domain.Records, &models.RecordConfig{Metadata: map[string]string{metaProxy: "true"}}) + err := cf.preprocessConfig(domain) + if err == nil { + t.Fatal("Expected validation error, but got none") + } +} + +func TestPreprocess_DefaultProxy(t *testing.T) { + cf := &CloudflareApi{} + domain := newDomainConfig() + domain.Metadata[metaProxyDefault] = "full" + domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "on"}}) + domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{metaProxy: "off"}}) + domain.Records = append(domain.Records, &models.RecordConfig{Type: "A", Target: "1.2.3.4", Metadata: map[string]string{}}) + err := cf.preprocessConfig(domain) + if err != nil { + t.Fatal(err) + } + expected := []string{"on", "off", "full"} + for i, rec := range domain.Records { + if rec.Metadata[metaProxy] != expected[i] { + t.Fatalf("At index %d: expect '%s' but found '%s'", i, expected[i], rec.Metadata[metaProxy]) + } + } +} + +func TestPreprocess_DefaultProxy_Validation(t *testing.T) { + cf := &CloudflareApi{} + domain := newDomainConfig() + domain.Metadata[metaProxyDefault] = "true" + err := cf.preprocessConfig(domain) + if err == nil { + t.Fatal("Expected validation error, but got none") + } +} + +func TestIpRewriting(t *testing.T) { + var tests = []struct { + Given, Expected string + Proxy string + }{ + //outside of range + {"5.5.5.5", "5.5.5.5", "full"}, + {"5.5.5.5", "5.5.5.5", "on"}, + // inside range, but not proxied + {"1.2.3.4", "1.2.3.4", "on"}, + //inside range and proxied + {"1.2.3.4", "255.255.255.4", "full"}, + } + cf := &CloudflareApi{} + domain := newDomainConfig() + cf.ipConversions = []transform.IpConversion{{net.ParseIP("1.2.3.0"), net.ParseIP("1.2.3.40"), net.ParseIP("255.255.255.0"), nil}} + for _, tst := range tests { + rec := &models.RecordConfig{Type: "A", Target: tst.Given, Metadata: map[string]string{metaProxy: tst.Proxy}} + domain.Records = append(domain.Records, rec) + } + err := cf.preprocessConfig(domain) + if err != nil { + t.Fatal(err) + } + for i, tst := range tests { + rec := domain.Records[i] + if rec.Target != tst.Expected { + t.Fatalf("At index %d, expected target of %s, but found %s.", i, tst.Expected, rec.Target) + } + if tst.Proxy == "full" && tst.Given != tst.Expected && rec.Metadata[metaOriginalIP] != tst.Given { + t.Fatalf("At index %d, expected original_ip to be set", i) + } + } +} + +func TestCnameValidation(t *testing.T) { + +} diff --git a/providers/cloudflare/rest.go b/providers/cloudflare/rest.go new file mode 100644 index 000000000..e77cad161 --- /dev/null +++ b/providers/cloudflare/rest.go @@ -0,0 +1,251 @@ +package cloudflare + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + + "github.com/StackExchange/dnscontrol/models" + "github.com/StackExchange/dnscontrol/providers/diff" +) + +const ( + baseURL = "https://api.cloudflare.com/client/v4/" + zonesURL = baseURL + "zones/" + recordsURL = zonesURL + "%s/dns_records/" + singleRecordURL = recordsURL + "%s" +) + +// get list of domains for account. Cache so the ids can be looked up from domain name +func (c *CloudflareApi) fetchDomainList() error { + c.domainIndex = map[string]string{} + c.nameservers = map[string][]*models.Nameserver{} + page := 1 + for { + zr := &zoneResponse{} + url := fmt.Sprintf("%s?page=%d&per_page=50", zonesURL, page) + if err := c.get(url, zr); err != nil { + return fmt.Errorf("Error fetching domain list from cloudflare: %s", err) + } + if !zr.Success { + return fmt.Errorf("Error fetching domain list from cloudflare: %s", stringifyErrors(zr.Errors)) + } + for _, zone := range zr.Result { + c.domainIndex[zone.Name] = zone.ID + for _, ns := range zone.Nameservers { + c.nameservers[zone.Name] = append(c.nameservers[zone.Name], &models.Nameserver{Name: ns}) + } + } + ri := zr.ResultInfo + if len(zr.Result) == 0 || ri.Page*ri.PerPage >= ri.TotalCount { + break + } + page++ + } + return nil +} + +// get all records for a domain +func (c *CloudflareApi) getRecordsForDomain(id string) ([]diff.Record, error) { + url := fmt.Sprintf(recordsURL, id) + page := 1 + records := []diff.Record{} + for { + reqURL := fmt.Sprintf("%s?page=%d&per_page=100", url, page) + var data recordsResponse + if err := c.get(reqURL, &data); err != nil { + return nil, fmt.Errorf("Error fetching record list from cloudflare: %s", err) + } + if !data.Success { + return nil, fmt.Errorf("Error fetching record list cloudflare: %s", stringifyErrors(data.Errors)) + } + for _, rec := range data.Result { + records = append(records, rec) + } + ri := data.ResultInfo + if len(data.Result) == 0 || ri.Page*ri.PerPage >= ri.TotalCount { + break + } + page++ + } + return records, nil +} + +// create a correction to delete a record +func (c *CloudflareApi) deleteRec(rec *cfRecord, domainID string) *models.Correction { + return &models.Correction{ + Msg: fmt.Sprintf("DELETE record: %s %s %d %s (id=%s)", rec.Name, rec.Type, rec.TTL, rec.Content, rec.ID), + F: func() error { + endpoint := fmt.Sprintf(singleRecordURL, domainID, rec.ID) + req, err := http.NewRequest("DELETE", endpoint, nil) + if err != nil { + return err + } + c.setHeaders(req) + _, err = handleActionResponse(http.DefaultClient.Do(req)) + return err + }, + } +} + +func (c *CloudflareApi) createRec(rec *models.RecordConfig, domainID string) []*models.Correction { + type createRecord struct { + Name string `json:"name"` + Type string `json:"type"` + Content string `json:"content"` + TTL uint32 `json:"ttl"` + Priority uint16 `json:"priority"` + } + var id string + content := rec.Target + if rec.Metadata[metaOriginalIP] != "" { + content = rec.Metadata[metaOriginalIP] + } + prio := "" + if rec.Type == "MX" { + prio = fmt.Sprintf(" %d ", rec.Priority) + } + arr := []*models.Correction{{ + Msg: fmt.Sprintf("CREATE record: %s %s %d%s %s", rec.Name, rec.Type, rec.TTL, prio, content), + F: func() error { + + cf := &createRecord{ + Name: rec.Name, + Type: rec.Type, + TTL: rec.TTL, + Content: content, + Priority: rec.Priority, + } + endpoint := fmt.Sprintf(recordsURL, domainID) + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + if err := encoder.Encode(cf); err != nil { + return err + } + req, err := http.NewRequest("POST", endpoint, buf) + if err != nil { + return err + } + c.setHeaders(req) + id, err = handleActionResponse(http.DefaultClient.Do(req)) + return err + }, + }} + if rec.Metadata[metaProxy] != "off" { + arr = append(arr, &models.Correction{ + Msg: fmt.Sprintf("ACTIVATE PROXY for new record %s %s %d %s", rec.Name, rec.Type, rec.TTL, rec.Target), + F: func() error { return c.modifyRecord(domainID, id, true, rec) }, + }) + } + return arr +} + +func (c *CloudflareApi) modifyRecord(domainID, recID string, proxied bool, rec *models.RecordConfig) error { + if domainID == "" || recID == "" { + return fmt.Errorf("Cannot modify record if domain or record id are empty.") + } + type record struct { + ID string `json:"id"` + Proxied bool `json:"proxied"` + Name string `json:"name"` + Type string `json:"type"` + Content string `json:"content"` + Priority uint16 `json:"priority"` + TTL uint32 `json:"ttl"` + } + r := record{recID, proxied, rec.Name, rec.Type, rec.Target, rec.Priority, rec.TTL} + endpoint := fmt.Sprintf(singleRecordURL, domainID, recID) + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + if err := encoder.Encode(r); err != nil { + return err + } + req, err := http.NewRequest("PUT", endpoint, buf) + if err != nil { + return err + } + c.setHeaders(req) + _, err = handleActionResponse(http.DefaultClient.Do(req)) + return err +} + +// common error handling for all action responses +func handleActionResponse(resp *http.Response, err error) (id string, e error) { + if err != nil { + return "", err + } + defer resp.Body.Close() + result := &basicResponse{} + decoder := json.NewDecoder(resp.Body) + if err = decoder.Decode(result); err != nil { + return "", fmt.Errorf("Unknown error. Status code: %d", resp.StatusCode) + } + if resp.StatusCode != 200 { + return "", fmt.Errorf(stringifyErrors(result.Errors)) + } + return result.Result.ID, nil +} + +func (c *CloudflareApi) setHeaders(req *http.Request) { + req.Header.Set("X-Auth-Key", c.ApiKey) + req.Header.Set("X-Auth-Email", c.ApiUser) +} + +// generic get handler. makes request and unmarshalls response to given interface +func (c *CloudflareApi) get(endpoint string, target interface{}) error { + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return err + } + c.setHeaders(req) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return fmt.Errorf("Bad status code from cloudflare: %d not 200.", resp.StatusCode) + } + decoder := json.NewDecoder(resp.Body) + return decoder.Decode(target) +} + +func stringifyErrors(errors []interface{}) string { + dat, err := json.Marshal(errors) + if err != nil { + return "???" + } + return string(dat) +} + +type recordsResponse struct { + basicResponse + Result []*cfRecord `json:"result"` + ResultInfo pagingInfo `json:"result_info"` +} +type basicResponse struct { + Success bool `json:"success"` + Errors []interface{} `json:"errors"` + Messages []interface{} `json:"messages"` + Result struct { + ID string `json:"id"` + } `json:"result"` +} + +type zoneResponse struct { + basicResponse + Result []struct { + ID string `json:"id"` + Name string `json:"name"` + Nameservers []string `json:"name_servers"` + } `json:"result"` + ResultInfo pagingInfo `json:"result_info"` +} + +type pagingInfo struct { + Page int `json:"page"` + PerPage int `json:"per_page"` + Count int `json:"count"` + TotalCount int `json:"total_count"` +} diff --git a/providers/config/providerConfig.go b/providers/config/providerConfig.go new file mode 100644 index 000000000..b3d29fcb7 --- /dev/null +++ b/providers/config/providerConfig.go @@ -0,0 +1,50 @@ +// Package config provides functions for reading and parsing the provider credentials json file. +// It cleans nonstandard json features (comments and trailing commas), as well as replaces environment variable placeholders with +// their environment variable equivalents. To reference an environment variable in your json file, simply use values in this format: +// "key"="$ENV_VAR_NAME" +package config + +import ( + "encoding/json" + "fmt" + "os" + "strings" + + "github.com/DisposaBoy/JsonConfigReader" + "github.com/TomOnTime/utfutil" +) + +// LoadProviderConfigs will open the specified file name, and parse its contents. It will replace environment variables it finds if any value matches $[A-Za-z_-0-9]+ +func LoadProviderConfigs(fname string) (map[string]map[string]string, error) { + var results = map[string]map[string]string{} + dat, err := utfutil.ReadFile(fname, utfutil.POSIX) + if err != nil { + return nil, fmt.Errorf("While reading provider credentials file %v: %v", fname, err) + } + s := string(dat) + r := JsonConfigReader.New(strings.NewReader(s)) + err = json.NewDecoder(r).Decode(&results) + if err != nil { + return nil, fmt.Errorf("While parsing provider credentials file %v: %v", fname, err) + } + if err = replaceEnvVars(results); err != nil { + return nil, err + } + return results, nil +} + +func replaceEnvVars(m map[string]map[string]string) error { + for provider, keys := range m { + for k, v := range keys { + if strings.HasPrefix(v, "$") { + env := v[1:] + newVal := os.Getenv(env) + if newVal == "" { + return fmt.Errorf("Provider %s references environment variable %s, but has no value.", provider, env) + } + keys[k] = newVal + } + } + } + return nil +} diff --git a/providers/diff/diff.go b/providers/diff/diff.go new file mode 100644 index 000000000..bdf6891a0 --- /dev/null +++ b/providers/diff/diff.go @@ -0,0 +1,153 @@ +package diff + +import ( + "fmt" + "sort" +) + +type Record interface { + GetName() string + GetType() string + GetContent() string + + // Get relevant comparision data. Default implentation uses "ttl [mx priority]", but providers may insert + // provider specific metadata if needed. + GetComparisionData() string +} + +type Correlation struct { + Existing Record + Desired Record +} +type Changeset []Correlation + +func IncrementalDiff(existing []Record, desired []Record) (unchanged, create, toDelete, modify Changeset) { + unchanged = Changeset{} + create = Changeset{} + toDelete = Changeset{} + modify = Changeset{} + + // log.Printf("ID existing records: (%d)\n", len(existing)) + // for i, d := range existing { + // log.Printf("\t%d\t%v\n", i, d) + // } + // log.Printf("ID desired records: (%d)\n", len(desired)) + // for i, d := range desired { + // log.Printf("\t%d\t%v\n", i, d) + // } + + //sort existing and desired by name + type key struct { + name, rType string + } + existingByNameAndType := map[key][]Record{} + desiredByNameAndType := map[key][]Record{} + for _, e := range existing { + k := key{e.GetName(), e.GetType()} + existingByNameAndType[k] = append(existingByNameAndType[k], e) + } + for _, d := range desired { + k := key{d.GetName(), d.GetType()} + desiredByNameAndType[k] = append(desiredByNameAndType[k], d) + } + + // Look through existing records. This will give us changes and deletions and some additions + for key, existingRecords := range existingByNameAndType { + desiredRecords := desiredByNameAndType[key] + + //first look through records that are the same content on both sides. Those are either modifications or unchanged + + for i := len(existingRecords) - 1; i >= 0; i-- { + ex := existingRecords[i] + for j, de := range desiredRecords { + if de.GetContent() == ex.GetContent() { + //they're either identical or should be a modification of each other + if de.GetComparisionData() == ex.GetComparisionData() { + unchanged = append(unchanged, Correlation{ex, de}) + } else { + modify = append(modify, Correlation{ex, de}) + } + // remove from both slices by index + existingRecords = existingRecords[:i+copy(existingRecords[i:], existingRecords[i+1:])] + desiredRecords = desiredRecords[:j+copy(desiredRecords[j:], desiredRecords[j+1:])] + break + } + } + } + + desiredLookup := map[string]Record{} + existingLookup := map[string]Record{} + // build index based on normalized value/ttl + for _, ex := range existingRecords { + normalized := fmt.Sprintf("%s %s", ex.GetContent(), ex.GetComparisionData()) + if existingLookup[normalized] != nil { + panic(fmt.Sprintf("DUPLICATE E_RECORD FOUND: %s %s", key, normalized)) + } + existingLookup[normalized] = ex + } + for _, de := range desiredRecords { + normalized := fmt.Sprintf("%s %s", de.GetContent(), de.GetComparisionData()) + if desiredLookup[normalized] != nil { + panic(fmt.Sprintf("DUPLICATE D_RECORD FOUND: %s %s", key, normalized)) + } + desiredLookup[normalized] = de + } + // if a record is in both, it is unchanged + for norm, ex := range existingLookup { + if de, ok := desiredLookup[norm]; ok { + unchanged = append(unchanged, Correlation{ex, de}) + delete(existingLookup, norm) + delete(desiredLookup, norm) + } + } + //sort records by normalized text. Keeps behaviour deterministic + existingStrings, desiredStrings := []string{}, []string{} + for norm := range existingLookup { + existingStrings = append(existingStrings, norm) + } + for norm := range desiredLookup { + desiredStrings = append(desiredStrings, norm) + } + sort.Strings(existingStrings) + sort.Strings(desiredStrings) + // Modifications. Take 1 from each side. + for len(desiredStrings) > 0 && len(existingStrings) > 0 { + modify = append(modify, Correlation{existingLookup[existingStrings[0]], desiredLookup[desiredStrings[0]]}) + existingStrings = existingStrings[1:] + desiredStrings = desiredStrings[1:] + } + // If desired still has things they are additions + for _, norm := range desiredStrings { + rec := desiredLookup[norm] + create = append(create, Correlation{nil, rec}) + } + // if found , but not desired, delete it + for _, norm := range existingStrings { + rec := existingLookup[norm] + toDelete = append(toDelete, Correlation{rec, nil}) + } + // remove this set from the desired list to indicate we have processed it. + delete(desiredByNameAndType, key) + } + + //any name/type sets not already processed are pure additions + for name := range existingByNameAndType { + delete(desiredByNameAndType, name) + } + for _, desiredList := range desiredByNameAndType { + for _, rec := range desiredList { + create = append(create, Correlation{nil, rec}) + } + } + return +} + +func (c Correlation) String() string { + if c.Existing == nil { + return fmt.Sprintf("CREATE %s %s %s %s", c.Desired.GetType(), c.Desired.GetName(), c.Desired.GetContent(), c.Desired.GetComparisionData()) + } + if c.Desired == nil { + return fmt.Sprintf("DELETE %s %s %s %s", c.Existing.GetType(), c.Existing.GetName(), c.Existing.GetContent(), c.Existing.GetComparisionData()) + } + return fmt.Sprintf("MODIFY %s %s: (%s %s) -> (%s %s)", c.Existing.GetType(), c.Existing.GetName(), c.Existing.GetContent(), c.Existing.GetComparisionData(), c.Desired.GetContent(), c.Desired.GetComparisionData()) +} diff --git a/providers/diff/diff_test.go b/providers/diff/diff_test.go new file mode 100644 index 000000000..698a04ce5 --- /dev/null +++ b/providers/diff/diff_test.go @@ -0,0 +1,112 @@ +package diff + +import ( + "fmt" + "strings" + "testing" + + "github.com/miekg/dns/dnsutil" +) + +type myRecord string //@ A 1 1.2.3.4 + +func (m myRecord) GetName() string { + name := strings.SplitN(string(m), " ", 4)[0] + return dnsutil.AddOrigin(name, "example.com") +} +func (m myRecord) GetType() string { + return strings.SplitN(string(m), " ", 4)[1] +} +func (m myRecord) GetContent() string { + return strings.SplitN(string(m), " ", 4)[3] +} +func (m myRecord) GetComparisionData() string { + return fmt.Sprint(strings.SplitN(string(m), " ", 4)[2]) +} + +func TestAdditionsOnly(t *testing.T) { + desired := []Record{ + myRecord("@ A 1 1.2.3.4"), + } + existing := []Record{} + checkLengths(t, existing, desired, 0, 1, 0, 0) +} + +func TestDeletionsOnly(t *testing.T) { + existing := []Record{ + myRecord("@ A 1 1.2.3.4"), + } + desired := []Record{} + checkLengths(t, existing, desired, 0, 0, 1, 0) +} + +func TestModification(t *testing.T) { + existing := []Record{ + myRecord("www A 1 1.1.1.1"), + myRecord("@ A 1 1.2.3.4"), + } + desired := []Record{ + myRecord("@ A 32 1.2.3.4"), + myRecord("www A 1 1.1.1.1"), + } + un, _, _, mod := checkLengths(t, existing, desired, 1, 0, 0, 1) + if t.Failed() { + return + } + if un[0].Desired != desired[1] || un[0].Existing != existing[0] { + t.Error("Expected unchanged records to be correlated") + } + if mod[0].Desired != desired[0] || mod[0].Existing != existing[1] { + t.Errorf("Expected modified records to be correlated") + } +} + +func TestUnchangedWithAddition(t *testing.T) { + existing := []Record{ + myRecord("www A 1 1.1.1.1"), + } + desired := []Record{ + myRecord("www A 1 1.2.3.4"), + myRecord("www A 1 1.1.1.1"), + } + un, _, _, _ := checkLengths(t, existing, desired, 1, 1, 0, 0) + if un[0].Desired != desired[1] || un[0].Existing != existing[0] { + t.Errorf("Expected unchanged records to be correlated") + } +} + +func TestOutOfOrderRecords(t *testing.T) { + existing := []Record{ + myRecord("www A 1 1.1.1.1"), + myRecord("www A 1 2.2.2.2"), + myRecord("www A 1 3.3.3.3"), + } + desired := []Record{ + myRecord("www A 1 1.1.1.1"), + myRecord("www A 1 2.2.2.2"), + myRecord("www A 1 2.2.2.3"), + myRecord("www A 10 3.3.3.3"), + } + _, _, _, mods := checkLengths(t, existing, desired, 2, 1, 0, 1) + if mods[0].Desired != desired[3] || mods[0].Existing != existing[2] { + t.Fatalf("Expected to match %s and %s, but matched %s and %s", existing[2], desired[3], mods[0].Existing, mods[0].Desired) + } + +} + +func checkLengths(t *testing.T, existing, desired []Record, unCount, createCount, delCount, modCount int) (un, cre, del, mod Changeset) { + un, cre, del, mod = IncrementalDiff(existing, desired) + if len(un) != unCount { + t.Errorf("Got %d unchanged records, but expected %d", len(un), unCount) + } + if len(cre) != createCount { + t.Errorf("Got %d records to create, but expected %d", len(cre), createCount) + } + if len(del) != delCount { + t.Errorf("Got %d records to delete, but expected %d", len(del), delCount) + } + if len(mod) != modCount { + t.Errorf("Got %d records to modify, but expected %d", len(mod), modCount) + } + return +} diff --git a/providers/gandi/gandiProvider.go b/providers/gandi/gandiProvider.go new file mode 100644 index 000000000..95d0bf8f3 --- /dev/null +++ b/providers/gandi/gandiProvider.go @@ -0,0 +1,184 @@ +package gandi + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/StackExchange/dnscontrol/models" + "github.com/StackExchange/dnscontrol/providers" + "github.com/StackExchange/dnscontrol/providers/diff" + + gandirecord "github.com/prasmussen/gandi-api/domain/zone/record" +) + +/* + +Gandi API DNS provider: + +Info required in `creds.json`: + - apikey + +*/ + +type GandiApi struct { + ApiKey string + domainIndex map[string]int64 // Map of domainname to index + nameservers map[string][]*models.Nameserver + ZoneId int64 +} + +type cfRecord struct { + gandirecord.RecordInfo +} + +func (c *cfRecord) GetName() string { + return c.Name +} + +func (c *cfRecord) GetType() string { + return c.Type +} + +func (c *cfRecord) GetTtl() int64 { + return c.Ttl +} + +func (c *cfRecord) GetValue() string { + return c.Value +} + +func (c *cfRecord) GetContent() string { + switch c.Type { + case "MX": + parts := strings.SplitN(c.Value, " ", 2) + // TODO(tlim): This should check for more errors. + return strings.Join(parts[1:], " ") + default: + } + return c.Value +} + +func (c *cfRecord) GetComparisionData() string { + if c.Type == "MX" { + parts := strings.SplitN(c.Value, " ", 2) + priority, err := strconv.Atoi(parts[0]) + if err != nil { + return fmt.Sprintf("%s %#v", c.Ttl, parts[0]) + } + return fmt.Sprintf("%d %d", c.Ttl, priority) + } + return fmt.Sprintf("%d", c.Ttl) +} + +func (c *GandiApi) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + + if c.domainIndex == nil { + if err := c.fetchDomainList(); err != nil { + return nil, err + } + } + + _, ok := c.domainIndex[dc.Name] + if !ok { + return nil, fmt.Errorf("%s not listed in zones for gandi account", dc.Name) + } + + domaininfo, err := c.fetchDomainInfo(dc.Name) + if err != nil { + return nil, err + } + for _, nsname := range domaininfo.Nameservers { + dc.Nameservers = append(dc.Nameservers, &models.Nameserver{Name: nsname}) + } + foundRecords, err := c.getZoneRecords(domaininfo.ZoneId) + if err != nil { + return nil, err + } + + // Convert to []diff.Records and compare: + foundDiffRecords := make([]diff.Record, len(foundRecords)) + for i, rec := range foundRecords { + n := &cfRecord{} + n.Id = 0 + n.Name = rec.Name + n.Ttl = int64(rec.Ttl) + n.Type = rec.Type + n.Value = rec.Value + foundDiffRecords[i] = n + } + expectedDiffRecords := make([]diff.Record, len(dc.Records)) + expectedRecordSets := make([]gandirecord.RecordSet, len(dc.Records)) + for i, rec := range dc.Records { + n := &cfRecord{} + n.Id = 0 + n.Name = rec.Name + n.Ttl = int64(rec.TTL) + if n.Ttl == 0 { + n.Ttl = 3600 + } + n.Type = rec.Type + switch n.Type { + case "MX": + n.Value = fmt.Sprintf("%d %s", rec.Priority, rec.Target) + case "TXT": + n.Value = "\"" + rec.Target + "\"" // FIXME(tlim): Should do proper quoting. + default: + n.Value = rec.Target + } + expectedDiffRecords[i] = n + expectedRecordSets[i] = gandirecord.RecordSet{} + expectedRecordSets[i]["type"] = n.Type + expectedRecordSets[i]["name"] = n.Name + expectedRecordSets[i]["value"] = n.Value + if n.Ttl != 0 { + expectedRecordSets[i]["ttl"] = n.Ttl + } + } + _, create, del, mod := diff.IncrementalDiff(foundDiffRecords, expectedDiffRecords) + + // Print a list of changes. Generate an actual change that is the zone + changes := false + for _, i := range create { + changes = true + fmt.Println(i) + } + for _, i := range del { + changes = true + fmt.Println(i) + } + for _, i := range mod { + changes = true + fmt.Println(i) + } + + msg := fmt.Sprintf("GENERATE_ZONE: %s (%d records)", dc.Name, len(expectedDiffRecords)) + corrections := []*models.Correction{} + if changes { + corrections = append(corrections, + &models.Correction{ + Msg: msg, + F: func() error { + fmt.Printf("CREATING ZONE: %v\n", dc.Name) + return c.createGandiZone(dc.Name, domaininfo.ZoneId, expectedRecordSets) + }, + }) + } + + return corrections, nil +} + +func newGandi(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) { + api := &GandiApi{} + api.ApiKey = m["apikey"] + if api.ApiKey == "" { + return nil, fmt.Errorf("Gandi apikey must be provided.") + } + + return api, nil +} + +func init() { + providers.RegisterDomainServiceProviderType("GANDI", newGandi) +} diff --git a/providers/gandi/protocol.go b/providers/gandi/protocol.go new file mode 100644 index 000000000..1bce518dd --- /dev/null +++ b/providers/gandi/protocol.go @@ -0,0 +1,168 @@ +package gandi + +import ( + "fmt" + + "github.com/StackExchange/dnscontrol/providers/diff" +) + +import ( + gandiclient "github.com/prasmussen/gandi-api/client" + gandidomain "github.com/prasmussen/gandi-api/domain" + gandizone "github.com/prasmussen/gandi-api/domain/zone" + gandirecord "github.com/prasmussen/gandi-api/domain/zone/record" + gandiversion "github.com/prasmussen/gandi-api/domain/zone/version" +) + +// fetchDomainList gets list of domains for account. Cache ids for easy lookup. +func (c *GandiApi) fetchDomainList() error { + c.domainIndex = map[string]int64{} + gc := gandiclient.New(c.ApiKey, gandiclient.Production) + domain := gandidomain.New(gc) + domains, err := domain.List() + if err != nil { + // fmt.Println(err) + return err + } + for _, d := range domains { + c.domainIndex[d.Fqdn] = d.Id + } + return nil +} + +// fetchDomainInfo gets information about a domain. +func (c *GandiApi) fetchDomainInfo(fqdn string) (*gandidomain.DomainInfo, error) { + gc := gandiclient.New(c.ApiKey, gandiclient.Production) + domain := gandidomain.New(gc) + return domain.Info(fqdn) +} + +// getRecordsForDomain returns a list of records for a zone. +func (c *GandiApi) getZoneRecords(zoneid int64) ([]*gandirecord.RecordInfo, error) { + gc := gandiclient.New(c.ApiKey, gandiclient.Production) + record := gandirecord.New(gc) + return record.List(zoneid, 0) +} + +// listZones retrieves the list of zones. +func (c *GandiApi) listZones() ([]*gandizone.ZoneInfoBase, error) { + gc := gandiclient.New(c.ApiKey, gandiclient.Production) + zone := gandizone.New(gc) + return zone.List() +} + +// setZone assigns a particular zone to a domain. +func (c *GandiApi) setZones(domainname string, zone_id int64) (*gandidomain.DomainInfo, error) { + gc := gandiclient.New(c.ApiKey, gandiclient.Production) + zone := gandizone.New(gc) + return zone.Set(domainname, zone_id) +} + +// getZoneInfo gets ZoneInfo about a zone. +func (c *GandiApi) getZoneInfo(zoneid int64) (*gandizone.ZoneInfo, error) { + gc := gandiclient.New(c.ApiKey, gandiclient.Production) + zone := gandizone.New(gc) + return zone.Info(zoneid) +} + +// createZone creates an entirely new zone. +func (c *GandiApi) createZone(name string) (*gandizone.ZoneInfo, error) { + gc := gandiclient.New(c.ApiKey, gandiclient.Production) + zone := gandizone.New(gc) + return zone.Create(name) +} + +// replaceZoneContents +func (c *GandiApi) replaceZoneContents(zone_id int64, version_id int64, records []diff.Record) error { + return fmt.Errorf("replaceZoneContents unimplemented") +} + +func (c *GandiApi) getEditableZone(domainname string, zoneinfo *gandizone.ZoneInfo) (int64, error) { + var zone_id int64 + if zoneinfo.Domains < 2 { + // If there is only on{ domain linked to this zone, use it. + zone_id = zoneinfo.Id + fmt.Printf("Using zone id=%d named %#v\n", zone_id, zoneinfo.Name) + return zone_id, nil + } + + // We can't use the zone_id given to us. Let's make/find a new one. + zones, err := c.listZones() + if err != nil { + return 0, err + } + zonename := fmt.Sprintf("%s dnscontrol", domainname) + for _, z := range zones { + if z.Name == zonename { + zone_id = z.Id + fmt.Printf("Recycling zone id=%d named %#v\n", zone_id, z.Name) + return zone_id, nil + } + } + zoneinfo, err = c.createZone(zonename) + if err != nil { + return 0, err + } + zone_id = zoneinfo.Id + fmt.Printf("Created zone id=%d named %#v\n", zone_id, zoneinfo.Name) + return zone_id, nil +} + +// makeEditableZone +func (c *GandiApi) makeEditableZone(zone_id int64) (int64, error) { + gc := gandiclient.New(c.ApiKey, gandiclient.Production) + version := gandiversion.New(gc) + return version.New(zone_id, 0) +} + +// setZoneRecords +func (c *GandiApi) setZoneRecords(zone_id, version_id int64, records []gandirecord.RecordSet) ([]*gandirecord.RecordInfo, error) { + gc := gandiclient.New(c.ApiKey, gandiclient.Production) + record := gandirecord.New(gc) + return record.SetRecords(zone_id, version_id, records) +} + +// activateVersion +func (c *GandiApi) activateVersion(zone_id, version_id int64) (bool, error) { + gc := gandiclient.New(c.ApiKey, gandiclient.Production) + version := gandiversion.New(gc) + return version.Set(zone_id, version_id) +} + +func (c *GandiApi) createGandiZone(domainname string, zone_id int64, records []gandirecord.RecordSet) error { + + // Get the zone_id of the zone we'll be updating. + zoneinfo, err := c.getZoneInfo(zone_id) + if err != nil { + return err + } + //fmt.Println("ZONEINFO:", zoneinfo) + zone_id, err = c.getEditableZone(domainname, zoneinfo) + if err != nil { + return err + } + + // Get the version_id of the zone we're updating. + version_id, err := c.makeEditableZone(zone_id) + if err != nil { + return err + } + + // Update the new version. + _, err = c.setZoneRecords(zone_id, version_id, records) + if err != nil { + return err + } + + // Activate zone version + _, err = c.activateVersion(zone_id, version_id) + if err != nil { + return err + } + _, err = c.setZones(domainname, zone_id) + if err != nil { + return err + } + + return nil +} diff --git a/providers/namecheap/namecheap.go b/providers/namecheap/namecheap.go new file mode 100644 index 000000000..87b69c3fd --- /dev/null +++ b/providers/namecheap/namecheap.go @@ -0,0 +1,61 @@ +package namecheap + +import ( + "fmt" + "strings" + + nc "github.com/billputer/go-namecheap" + "github.com/StackExchange/dnscontrol/models" + "github.com/StackExchange/dnscontrol/providers" +) + +type Namecheap struct { + ApiKey string + ApiUser string + client *nc.Client +} + +func init() { + providers.RegisterRegistrarType("NAMECHEAP", newReg) +} + +func newReg(m map[string]string) (providers.Registrar, error) { + api := &Namecheap{} + api.ApiUser, api.ApiKey = m["apiuser"], m["apikey"] + if api.ApiKey == "" || api.ApiUser == "" { + return nil, fmt.Errorf("Namecheap apikey and apiuser must be provided.") + } + api.client = nc.NewClient(api.ApiUser, api.ApiKey, api.ApiUser) + return api, nil +} + +func (n *Namecheap) GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + info, err := n.client.DomainGetInfo(dc.Name) + if err != nil { + return nil, err + } + //todo: sort both + found := strings.Join(info.DNSDetails.Nameservers, ",") + desired := "" + for _, d := range dc.Nameservers { + if desired != "" { + desired += "," + } + desired += d.Name + } + if found != desired { + parts := strings.SplitN(dc.Name, ".", 2) + sld, tld := parts[0], parts[1] + return []*models.Correction{ + {Msg: fmt.Sprintf("Change Nameservers from '%s' to '%s'", found, desired), + F: func() error { + _, err := n.client.DomainDNSSetCustom(sld, tld, desired) + if err != nil { + return err + } + return nil + }}, + }, nil + } + return nil, nil +} diff --git a/providers/namedotcom/namedotcom.md b/providers/namedotcom/namedotcom.md new file mode 100644 index 000000000..9988c73eb --- /dev/null +++ b/providers/namedotcom/namedotcom.md @@ -0,0 +1,48 @@ +## name.com Provider + +### required config + +In your providers config json file you must provide your name.com api username and access token: + +``` + "yourNameDotComProviderName":{ + "apikey": "yourApiKeyFromName.com-klasjdkljasdlk235235235235", + "apiuser": "yourUsername" + } +``` + +In order to get api access you need to [apply for access](https://www.name.com/reseller/apply) + +### example dns config js (registrar only): + +``` +var NAMECOM = NewRegistrar("myNameCom","NAMEDOTCOM"); + +var mynameServers = [ + NAMESERVER("bill.ns.cloudflare.com"), + NAMESERVER("fred.ns.cloudflare.com") +]; + +D("example.tld",NAMECOM,myNameServers + //records handled by another provider... +); +``` + +### example config (registrar and records managed by namedotcom) + +``` +var NAMECOM = NewRegistrar("myNameCom","NAMEDOTCOM"); +var NAMECOMDSP = NewDSP("myNameCom","NAMEDOTCOM") + +D("exammple.tld", NAMECOM, NAMECOMDSP, + //ns[1-4].name.com used by default as nameservers + + //override default ttl of 300s + DefaultTTL(3600), + + A("test","1.2.3.4"), + + //override ttl for one record only + CNAME("foo","some.otherdomain.tld.",TTL(100)) +) +``` \ No newline at end of file diff --git a/providers/namedotcom/namedotcomProvider.go b/providers/namedotcom/namedotcomProvider.go new file mode 100644 index 000000000..d1a9c500e --- /dev/null +++ b/providers/namedotcom/namedotcomProvider.go @@ -0,0 +1,117 @@ +//Package namedotcom implements a registrar that uses the name.com api to set name servers. It will self register it's providers when imported. +package namedotcom + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + "github.com/StackExchange/dnscontrol/providers" +) + +type nameDotCom struct { + APIUser string `json:"apiuser"` + APIKey string `json:"apikey"` +} + +func newReg(conf map[string]string) (providers.Registrar, error) { + return newProvider(conf) +} + +func newDsp(conf map[string]string, meta json.RawMessage) (providers.DNSServiceProvider, error) { + return newProvider(conf) +} + +func newProvider(conf map[string]string) (*nameDotCom, error) { + api := &nameDotCom{} + api.APIUser, api.APIKey = conf["apiuser"], conf["apikey"] + if api.APIKey == "" || api.APIUser == "" { + return nil, fmt.Errorf("Name.com apikey and apiuser must be provided.") + } + return api, nil +} + +func init() { + providers.RegisterRegistrarType("NAMEDOTCOM", newReg) + providers.RegisterDomainServiceProviderType("NAMEDOTCOM", newDsp) +} + +/// +//various http helpers for interacting with api +/// + +func (n *nameDotCom) addAuth(r *http.Request) { + r.Header.Add("Api-Username", n.APIUser) + r.Header.Add("Api-Token", n.APIKey) +} + +type apiResult struct { + Result struct { + Code int `json:"code"` + Message string `json:"message"` + } `json:"result"` +} + +func (r *apiResult) getErr() error { + if r == nil { + return nil + } + if r.Result.Code != 100 { + if r.Result.Message == "" { + return fmt.Errorf("Unknown error from name.com") + } + return fmt.Errorf(r.Result.Message) + } + return nil +} + +var apiBase = "https://api.name.com/api" + +//perform http GET and unmarshal response json into target struct +func (n *nameDotCom) get(url string, target interface{}) error { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + n.addAuth(req) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + return json.Unmarshal(data, target) +} + +// perform http POST, json marshalling the given data into the body +func (n *nameDotCom) post(url string, data interface{}) (*apiResult, error) { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + if err := enc.Encode(data); err != nil { + return nil, err + } + req, err := http.NewRequest("POST", url, buf) + if err != nil { + return nil, err + } + n.addAuth(req) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + text, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + result := &apiResult{} + if err = json.Unmarshal(text, result); err != nil { + return nil, err + } + return result, nil +} diff --git a/providers/namedotcom/nameservers.go b/providers/namedotcom/nameservers.go new file mode 100644 index 000000000..2d3c44863 --- /dev/null +++ b/providers/namedotcom/nameservers.go @@ -0,0 +1,82 @@ +package namedotcom + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "github.com/StackExchange/dnscontrol/models" +) + +func (n *nameDotCom) GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + foundNameservers, err := n.getNameservers(dc.Name) + if err != nil { + return nil, err + } + if defaultNsRegexp.MatchString(foundNameservers) { + foundNameservers = "ns1.name.com,ns2.name.com,ns3.name.com,ns4.name.com" + } + expected := []string{} + for _, ns := range dc.Nameservers { + name := strings.TrimRight(ns.Name, ".") + expected = append(expected, name) + } + sort.Strings(expected) + expectedNameservers := strings.Join(expected, ",") + + if foundNameservers != expectedNameservers { + return []*models.Correction{ + { + Msg: fmt.Sprintf("Update nameservers %s -> %s", foundNameservers, expectedNameservers), + F: n.updateNameservers(expected, dc.Name), + }, + }, nil + } + return nil, nil +} + +//even if you provide them "ns1.name.com", they will set it to "ns1qrt.name.com". This will match that pattern to see if defaults are in use. +var defaultNsRegexp = regexp.MustCompile(`ns1[a-z]{0,3}\.name\.com,ns2[a-z]{0,3}\.name\.com,ns3[a-z]{0,3}\.name\.com,ns4[a-z]{0,3}\.name\.com`) + +func apiGetDomain(domain string) string { + return fmt.Sprintf("%s/domain/get/%s", apiBase, domain) +} +func apiUpdateNS(domain string) string { + return fmt.Sprintf("%s/domain/update_nameservers/%s", apiBase, domain) +} + +type getDomainResult struct { + *apiResult + DomainName string `json:"domain_name"` + Nameservers []string `json:"nameservers"` +} + +// returns comma joined list of nameservers (in alphabetical order) +func (n *nameDotCom) getNameservers(domain string) (string, error) { + result := &getDomainResult{} + if err := n.get(apiGetDomain(domain), result); err != nil { + return "", err + } + if err := result.getErr(); err != nil { + return "", err + } + sort.Strings(result.Nameservers) + return strings.Join(result.Nameservers, ","), nil +} + +func (n *nameDotCom) updateNameservers(ns []string, domain string) func() error { + return func() error { + dat := struct { + Nameservers []string `json:"nameservers"` + }{ns} + resp, err := n.post(apiUpdateNS(domain), dat) + if err != nil { + return err + } + if err = resp.getErr(); err != nil { + return err + } + return nil + } +} diff --git a/providers/namedotcom/nameservers_test.go b/providers/namedotcom/nameservers_test.go new file mode 100644 index 000000000..e443314db --- /dev/null +++ b/providers/namedotcom/nameservers_test.go @@ -0,0 +1,150 @@ +package namedotcom + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/StackExchange/dnscontrol/models" +) + +var ( + mux *http.ServeMux + client *nameDotCom + server *httptest.Server +) + +func setup() { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + + client = &nameDotCom{ + APIUser: "bob", + APIKey: "123", + } + apiBase = server.URL +} + +func teardown() { + server.Close() +} + +func TestGetNameservers(t *testing.T) { + for i, test := range []struct { + givenNs, expected string + }{ + {"", ""}, + {`"foo.ns.tld","bar.ns.tld"`, "bar.ns.tld,foo.ns.tld"}, + {"ERR", "ERR"}, + {"MSGERR", "ERR"}, + } { + setup() + defer teardown() + + mux.HandleFunc("/domain/get/example.tld", func(w http.ResponseWriter, r *http.Request) { + if test.givenNs == "ERR" { + http.Error(w, "UH OH", 500) + return + } + if test.givenNs == "MSGERR" { + w.Write(nameComError) + return + } + w.Write(domainResponse(test.givenNs)) + }) + + found, err := client.getNameservers("example.tld") + if err != nil { + if test.expected == "ERR" { + continue + } + t.Errorf("Error on test %d: %s", i, err) + continue + } + if test.expected == "ERR" { + t.Errorf("Expected error on test %d, but was none", i) + continue + } + if found != test.expected { + t.Errorf("Test %d: Expected '%s', but found '%s'", i, test.expected, found) + } + } +} + +func TestGetCorrections(t *testing.T) { + for i, test := range []struct { + givenNs string + expected int + }{ + {"", 1}, + {`"foo.ns.tld","bar.ns.tld"`, 0}, + {`"bar.ns.tld","foo.ns.tld"`, 0}, + {`"foo.ns.tld"`, 1}, + {`"1.ns.aaa","2.ns.www"`, 1}, + {"ERR", -1}, //-1 means we expect an error + {"MSGERR", -1}, + } { + setup() + defer teardown() + + mux.HandleFunc("/domain/get/example.tld", func(w http.ResponseWriter, r *http.Request) { + if test.givenNs == "ERR" { + http.Error(w, "UH OH", 500) + return + } + if test.givenNs == "MSGERR" { + w.Write(nameComError) + return + } + w.Write(domainResponse(test.givenNs)) + }) + dc := &models.DomainConfig{ + Name: "example.tld", + Nameservers: []*models.Nameserver{ + {Name: "foo.ns.tld"}, + {Name: "bar.ns.tld"}, + }, + } + corrections, err := client.GetRegistrarCorrections(dc) + if err != nil { + if test.expected == -1 { + continue + } + t.Errorf("Error on test %d: %s", i, err) + continue + } + if test.expected == -1 { + t.Errorf("Expected error on test %d, but was none", i) + continue + } + if len(corrections) != test.expected { + t.Errorf("Test %d: Expected '%d', but found '%d'", i, test.expected, len(corrections)) + } + } +} + +func domainResponse(ns string) []byte { + return []byte(fmt.Sprintf(`{ + "result": { + "code": 100, + "message": "Command Successful" + }, + "domain_name": "example.tld", + "create_date": "2015-12-28 18:08:05", + "expire_date": "2016-12-28 23:59:59", + "locked": true, + "nameservers": [%s], + "contacts": [], + "addons": { + "whois_privacy": { + "price": "3.99" + }, + "domain\/renew": { + "price": "10.99" + } + } +}`, ns)) +} + +var nameComError = []byte(`{"result":{"code":251,"message":"Authentication Error - Invalid Username Or Api Token"}}`) diff --git a/providers/namedotcom/records.go b/providers/namedotcom/records.go new file mode 100644 index 000000000..715efcc40 --- /dev/null +++ b/providers/namedotcom/records.go @@ -0,0 +1,168 @@ +package namedotcom + +import ( + "fmt" + + "github.com/miekg/dns/dnsutil" + + "github.com/StackExchange/dnscontrol/models" + "github.com/StackExchange/dnscontrol/providers/diff" +) + +var defaultNameservers = []*models.Nameserver{ + {Name: "ns1.name.com"}, + {Name: "ns2.name.com"}, + {Name: "ns3.name.com"}, + {Name: "ns4.name.com"}, +} + +func (n *nameDotCom) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + dc.Nameservers = defaultNameservers + records, err := n.getRecords(dc.Name) + if err != nil { + return nil, err + } + actual := make([]diff.Record, len(records)) + for i := range records { + actual[i] = records[i] + } + + desired := make([]diff.Record, len(dc.Records)) + for i, rec := range dc.Records { + if rec.TTL == 0 { + rec.TTL = 300 + } + desired[i] = rec + } + + _, create, del, mod := diff.IncrementalDiff(actual, desired) + corrections := []*models.Correction{} + + for _, d := range del { + rec := d.Existing.(*nameComRecord) + c := &models.Correction{Msg: d.String(), F: func() error { return n.deleteRecord(rec.RecordID, dc.Name) }} + corrections = append(corrections, c) + } + for _, cre := range create { + rec := cre.Desired.(*models.RecordConfig) + c := &models.Correction{Msg: cre.String(), F: func() error { return n.createRecord(rec, dc.Name) }} + corrections = append(corrections, c) + } + for _, chng := range mod { + old := chng.Existing.(*nameComRecord) + new := chng.Desired.(*models.RecordConfig) + c := &models.Correction{Msg: chng.String(), F: func() error { + err := n.deleteRecord(old.RecordID, dc.Name) + if err != nil { + return err + } + return n.createRecord(new, dc.Name) + }} + corrections = append(corrections, c) + } + return corrections, nil +} + +func apiGetRecords(domain string) string { + return fmt.Sprintf("%s/dns/list/%s", apiBase, domain) +} +func apiCreateRecord(domain string) string { + return fmt.Sprintf("%s/dns/create/%s", apiBase, domain) +} +func apiDeleteRecord(domain string) string { + return fmt.Sprintf("%s/dns/delete/%s", apiBase, domain) +} + +type nameComRecord struct { + RecordID string `json:"record_id"` + Name string `json:"name"` + Type string `json:"type"` + Content string `json:"content"` + TTL string `json:"ttl"` + Priority string `json:"priority"` +} + +func (r *nameComRecord) GetName() string { + return r.Name +} +func (r *nameComRecord) GetType() string { + return r.Type +} +func (r *nameComRecord) GetContent() string { + return r.Content +} +func (r *nameComRecord) GetComparisionData() string { + mxPrio := "" + if r.Type == "MX" { + mxPrio = fmt.Sprintf(" %s ", r.Priority) + } + return fmt.Sprintf("%s%s", r.TTL, mxPrio) +} + +type listRecordsResponse struct { + *apiResult + Records []*nameComRecord `json:"records"` +} + +func (n *nameDotCom) getRecords(domain string) ([]*nameComRecord, error) { + result := &listRecordsResponse{} + err := n.get(apiGetRecords(domain), result) + if err != nil { + return nil, err + } + if err = result.getErr(); err != nil { + return nil, err + } + + for _, rc := range result.Records { + if rc.Type == "CNAME" || rc.Type == "MX" || rc.Type == "NS" { + rc.Content = rc.Content + "." + + } + } + + return result.Records, nil +} + +func (n *nameDotCom) createRecord(rc *models.RecordConfig, domain string) error { + target := rc.Target + if rc.Type == "CNAME" || rc.Type == "MX" || rc.Type == "NS" { + if target[len(target)-1] == '.' { + target = target[:len(target)-1] + } else { + return fmt.Errorf("Unexpected. CNAME/MX/NS target did not end with dot.\n") + } + } + dat := struct { + Hostname string `json:"hostname"` + Type string `json:"type"` + Content string `json:"content"` + TTL uint32 `json:"ttl,omitempty"` + Priority uint16 `json:"priority,omitempty"` + }{ + Hostname: dnsutil.TrimDomainName(rc.NameFQDN, domain), + Type: rc.Type, + Content: target, + TTL: rc.TTL, + Priority: rc.Priority, + } + if dat.Hostname == "@" { + dat.Hostname = "" + } + resp, err := n.post(apiCreateRecord(domain), dat) + if err != nil { + return err + } + return resp.getErr() +} + +func (n *nameDotCom) deleteRecord(id, domain string) error { + dat := struct { + ID string `json:"record_id"` + }{id} + resp, err := n.post(apiDeleteRecord(domain), dat) + if err != nil { + return err + } + return resp.getErr() +} diff --git a/providers/providers.go b/providers/providers.go new file mode 100644 index 000000000..284ce616c --- /dev/null +++ b/providers/providers.go @@ -0,0 +1,119 @@ +package providers + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/StackExchange/dnscontrol/models" +) + +//Registrar is an interface for a domain registrar. It can return a list of needed corrections to be applied in the future. +type Registrar interface { + GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error) +} + +//DNSServiceProvider is able to generate a set of corrections that need to be made to correct records for a domain +type DNSServiceProvider interface { + GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) +} + +//RegistrarInitializer is a function to create a registrar. Function will be passed the unprocessed json payload from the configuration file for the given provider. +type RegistrarInitializer func(map[string]string) (Registrar, error) + +var registrarTypes = map[string]RegistrarInitializer{} + +//DspInitializer is a function to create a registrar. Function will be passed the unprocessed json payload from the configuration file for the given provider. +type DspInitializer func(map[string]string, json.RawMessage) (DNSServiceProvider, error) + +var dspTypes = map[string]DspInitializer{} + +//RegisterRegistrarType adds a registrar type to the registry by providing a suitable initialization function. +func RegisterRegistrarType(name string, init RegistrarInitializer) { + if _, ok := registrarTypes[name]; ok { + log.Fatalf("Cannot register registrar type %s multiple times", name) + } + registrarTypes[name] = init +} + +//RegisterDomainServiceProviderType adds a dsp to the registry with the given initialization function. +func RegisterDomainServiceProviderType(name string, init DspInitializer) { + if _, ok := dspTypes[name]; ok { + log.Fatalf("Cannot register registrar type %s multiple times", name) + } + dspTypes[name] = init +} + +func createRegistrar(rType string, config map[string]string) (Registrar, error) { + initer, ok := registrarTypes[rType] + if !ok { + return nil, fmt.Errorf("Registrar type %s not declared.", rType) + } + return initer(config) +} + +func createDNSProvider(dType string, config map[string]string, meta json.RawMessage) (DNSServiceProvider, error) { + initer, ok := dspTypes[dType] + if !ok { + return nil, fmt.Errorf("DSP type %s not declared", dType) + } + return initer(config, meta) +} + +//CreateRegistrars will load all registrars from the dns config, and create instances of the correct type using data from +//the provider config to load relevant keys and options. +func CreateRegistrars(d *models.DNSConfig, providerConfigs map[string]map[string]string) (map[string]Registrar, error) { + regs := map[string]Registrar{} + for _, reg := range d.Registrars { + rawMsg, ok := providerConfigs[reg.Name] + if !ok && reg.Type != "NONE" { + return nil, fmt.Errorf("Registrar %s not listed in -providers file.", reg.Name) + } + registrar, err := createRegistrar(reg.Type, rawMsg) + if err != nil { + return nil, err + } + regs[reg.Name] = registrar + } + return regs, nil +} + +func CreateDsps(d *models.DNSConfig, providerConfigs map[string]map[string]string) (map[string]DNSServiceProvider, error) { + dsps := map[string]DNSServiceProvider{} + for _, dsp := range d.DNSProviders { + //log.Printf("dsp.Name=%#v\n", dsp.Name) + rawMsg, ok := providerConfigs[dsp.Name] + if !ok { + return nil, fmt.Errorf("DNSServiceProvider %s not listed in -providers file.", dsp.Name) + } + provider, err := createDNSProvider(dsp.Type, rawMsg, dsp.Metadata) + if err != nil { + log.Printf("createDNSProvider provider=%#v\n", provider) + log.Printf("createDNSProvider err=%#v\n", err) + return nil, err + } + dsps[dsp.Name] = provider + } + return dsps, nil +} + +// None is a basivc provider type that does absolutely nothing. Can be useful as a placeholder for third parties or unimplemented providers. +type None struct{} + +func (n None) GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + return nil, nil +} + +func (n None) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + return nil, nil +} + +func init() { + RegisterRegistrarType("NONE", func(map[string]string) (Registrar, error) { + return None{}, nil + }) + RegisterDomainServiceProviderType("NONE", func(map[string]string, json.RawMessage) (DNSServiceProvider, error) { + return None{}, nil + }) + +} diff --git a/providers/route53/route53Provider.go b/providers/route53/route53Provider.go new file mode 100644 index 000000000..11cd1b2ef --- /dev/null +++ b/providers/route53/route53Provider.go @@ -0,0 +1,269 @@ +package route53 + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + r53 "github.com/aws/aws-sdk-go/service/route53" + "github.com/StackExchange/dnscontrol/models" + "github.com/StackExchange/dnscontrol/providers" + "github.com/StackExchange/dnscontrol/providers/diff" +) + +type route53Provider struct { + client *r53.Route53 + zones map[string]*r53.HostedZone +} + +func newRoute53(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) { + keyId, secretKey := m["KeyId"], m["SecretKey"] + if keyId == "" || secretKey == "" { + return nil, fmt.Errorf("Route53 KeyId and SecretKey must be provided.") + } + sess := session.New(&aws.Config{ + Region: aws.String("us-west-2"), + Credentials: credentials.NewStaticCredentials(keyId, secretKey, ""), + }) + + api := &route53Provider{client: r53.New(sess)} + return api, nil +} + +func init() { + providers.RegisterDomainServiceProviderType("ROUTE53", newRoute53) +} +func sPtr(s string) *string { + return &s +} + +func (r *route53Provider) getZones() error { + var nextMarker *string + r.zones = make(map[string]*r53.HostedZone) + for { + inp := &r53.ListHostedZonesInput{MaxItems: sPtr("1"), Marker: nextMarker} + out, err := r.client.ListHostedZones(inp) + if err != nil { + return err + } + for _, z := range out.HostedZones { + domain := strings.TrimSuffix(*z.Name, ".") + r.zones[domain] = z + } + if out.NextMarker != nil { + nextMarker = out.NextMarker + } else { + break + } + } + return nil +} + +//map key for grouping records +type key struct { + Name, Type string +} + +func getKey(r diff.Record) key { + return key{r.GetName(), r.GetType()} +} + +func (r *route53Provider) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + if r.zones == nil { + if err := r.getZones(); err != nil { + return nil, err + } + } + var corrections = []*models.Correction{} + zone, ok := r.zones[dc.Name] + // add zone if it doesn't exist + if !ok { + //add correction to add zone + corrections = append(corrections, + &models.Correction{ + Msg: "Add zone to aws", + F: func() error { + in := &r53.CreateHostedZoneInput{ + Name: &dc.Name, + CallerReference: sPtr(fmt.Sprint(time.Now().UnixNano())), + } + out, err := r.client.CreateHostedZone(in) + zone = out.HostedZone + return err + }, + }) + //fake zone + zone = &r53.HostedZone{ + Id: sPtr(""), + } + } + + records, err := r.fetchRecordSets(zone.Id) + if err != nil { + return nil, err + } + + //convert to dnscontrol RecordConfig format + dc.Nameservers = nil + var existingRecords = []*models.RecordConfig{} + for _, set := range records { + for _, rec := range set.ResourceRecords { + if *set.Type == "SOA" { + continue + } else if *set.Type == "NS" && strings.TrimSuffix(*set.Name, ".") == dc.Name { + dc.Nameservers = append(dc.Nameservers, &models.Nameserver{Name: strings.TrimSuffix(*rec.Value, ".")}) + continue + } + r := &models.RecordConfig{ + NameFQDN: unescape(set.Name), + Type: *set.Type, + Target: *rec.Value, + TTL: uint32(*set.TTL), + } + existingRecords = append(existingRecords, r) + } + } + e, w := []diff.Record{}, []diff.Record{} + for _, ex := range existingRecords { + e = append(e, ex) + } + for _, want := range dc.Records { + if want.TTL == 0 { + want.TTL = 300 + } + if want.Type == "MX" { + want.Target = fmt.Sprintf("%d %s", want.Priority, want.Target) + want.Priority = 0 + } else if want.Type == "TXT" { + want.Target = fmt.Sprintf(`"%s"`, want.Target) //FIXME: better escaping/quoting + } + w = append(w, want) + } + + //diff + changeDesc := "" + _, create, delete, modify := diff.IncrementalDiff(e, w) + + namesToUpdate := map[key]bool{} + for _, c := range create { + namesToUpdate[getKey(c.Desired)] = true + changeDesc += fmt.Sprintln(c) + } + for _, d := range delete { + namesToUpdate[getKey(d.Existing)] = true + changeDesc += fmt.Sprintln(d) + } + for _, m := range modify { + namesToUpdate[getKey(m.Desired)] = true + changeDesc += fmt.Sprintln(m) + } + + if len(namesToUpdate) == 0 { + return nil, nil + } + + updates := map[key][]*models.RecordConfig{} + //for each name we need to update, collect relevant records from dc + for k := range namesToUpdate { + updates[k] = nil + for _, rc := range dc.Records { + if getKey(rc) == k { + updates[k] = append(updates[k], rc) + } + } + } + + changes := []*r53.Change{} + for k, recs := range updates { + chg := &r53.Change{} + changes = append(changes, chg) + var rrset *r53.ResourceRecordSet + if len(recs) == 0 { + chg.Action = sPtr("DELETE") + // on delete just submit the original resource set we got from r53. + for _, r := range records { + if *r.Name == k.Name+"." && *r.Type == k.Type { + rrset = r + break + } + } + } else { + //on change or create, just build a new record set from our desired state + chg.Action = sPtr("UPSERT") + rrset = &r53.ResourceRecordSet{ + Name: sPtr(k.Name), + Type: sPtr(k.Type), + ResourceRecords: []*r53.ResourceRecord{}, + } + for _, r := range recs { + val := r.Target + rr := &r53.ResourceRecord{ + Value: &val, + } + rrset.ResourceRecords = append(rrset.ResourceRecords, rr) + i := int64(r.TTL) + rrset.TTL = &i //TODO: make sure that ttls are consistent within a set + } + } + chg.ResourceRecordSet = rrset + } + + changeReq := &r53.ChangeResourceRecordSetsInput{ + ChangeBatch: &r53.ChangeBatch{Changes: changes}, + } + + corrections = append(corrections, + &models.Correction{ + Msg: changeDesc, + F: func() error { + changeReq.HostedZoneId = zone.Id + _, err := r.client.ChangeResourceRecordSets(changeReq) + return err + }, + }) + return corrections, nil + +} + +func (r *route53Provider) fetchRecordSets(zoneID *string) ([]*r53.ResourceRecordSet, error) { + if zoneID == nil || *zoneID == "" { + return nil, nil + } + var next *string + var nextType *string + var records []*r53.ResourceRecordSet + for { + listInput := &r53.ListResourceRecordSetsInput{ + HostedZoneId: zoneID, + StartRecordName: next, + StartRecordType: nextType, + MaxItems: sPtr("100"), + } + list, err := r.client.ListResourceRecordSets(listInput) + if err != nil { + return nil, err + } + records = append(records, list.ResourceRecordSets...) + if list.NextRecordName != nil { + next = list.NextRecordName + nextType = list.NextRecordType + } else { + break + } + } + return records, nil +} + +//we have to process names from route53 to match what we expect and to remove their odd octal encoding +func unescape(s *string) string { + if s == nil { + return "" + } + name := strings.TrimSuffix(*s, ".") + name = strings.Replace(name, `\052`, "*", -1) //TODO: escape all octal sequences + return name +} diff --git a/providers/route53/route53Provider_test.go b/providers/route53/route53Provider_test.go new file mode 100644 index 000000000..bab443ea9 --- /dev/null +++ b/providers/route53/route53Provider_test.go @@ -0,0 +1,24 @@ +package route53 + +import "testing" + +func TestUnescape(t *testing.T) { + var tests = []struct { + experiment, expected string + }{ + {"foo", "foo"}, + {"foo.", "foo"}, + {"foo..", "foo."}, + {"foo...", "foo.."}, + {`\052`, "*"}, + {`\052.foo..`, "*.foo."}, + // {`\053.foo`, "+.foo"}, // Not implemented yet. + } + + for i, test := range tests { + actual := unescape(&test.experiment) + if test.expected != actual { + t.Errorf("%d: Expected %s, got %s", i, test.expected, actual) + } + } +} diff --git a/transform/transform.go b/transform/transform.go new file mode 100644 index 000000000..bcbcf6a61 --- /dev/null +++ b/transform/transform.go @@ -0,0 +1,113 @@ +package transform + +import ( + "fmt" + "net" + "strings" +) + +type IpConversion struct { + Low, High, NewBase net.IP + NewIPs []net.IP +} + +func ipToUint(i net.IP) (uint32, error) { + parts := i.To4() + if parts == nil || len(parts) != 4 { + return 0, fmt.Errorf("%s is not an ipv4 address", parts.String()) + } + r := uint32(parts[0])<<24 | uint32(parts[1])<<16 | uint32(parts[2])<<8 | uint32(parts[3]) + return r, nil +} + +func UintToIP(u uint32) net.IP { + return net.IPv4( + byte((u>>24)&255), + byte((u>>16)&255), + byte((u>>8)&255), + byte((u)&255)) +} + +// DecodeTransformTable turns a string-encoded table into a list of conversions. +func DecodeTransformTable(transforms string) ([]IpConversion, error) { + result := []IpConversion{} + rows := strings.Split(transforms, ";") + for ri, row := range rows { + items := strings.Split(row, "~") + if len(items) != 4 { + return nil, fmt.Errorf("transform_table rows should have 4 elements. (%v) found in row (%v) of %#v\n", len(items), ri, transforms) + } + for i, item := range items { + items[i] = strings.TrimSpace(item) + } + + con := IpConversion{ + Low: net.ParseIP(items[0]), + High: net.ParseIP(items[1]), + NewBase: net.ParseIP(items[2]), + } + for _, ip := range strings.Split(items[3], ",") { + if ip == "" { + continue + } + addr := net.ParseIP(ip) + if addr == nil { + return nil, fmt.Errorf("%s is not a valid ip address", ip) + } + con.NewIPs = append(con.NewIPs, addr) + } + + low, _ := ipToUint(con.Low) + high, _ := ipToUint(con.High) + if low > high { + return nil, fmt.Errorf("transform_table Low should be less than High. row (%v) %v>%v (%v)\n", ri, con.Low, con.High, transforms) + } + if con.NewBase != nil && con.NewIPs != nil { + return nil, fmt.Errorf("transform_table_rows should only specify one of NewBase or NewIP. Not both.") + } + result = append(result, con) + } + + return result, nil +} + +// TransformIP transforms a single ip address. If the transform results in multiple new targets, an error will be returned. +func TransformIP(address net.IP, transforms []IpConversion) (net.IP, error) { + ips, err := TransformIPToList(address, transforms) + if err != nil { + return nil, err + } + if len(ips) != 1 { + return nil, fmt.Errorf("Expect exactly one ip for TransformIP result. Got: %s", ips) + } + return ips[0], err +} + +// TransformIPToList manipulates an net.IP based on a list of IpConversions. It can potentially expand one ip address into multiple addresses. +func TransformIPToList(address net.IP, transforms []IpConversion) ([]net.IP, error) { + thisIP, err := ipToUint(address) + if err != nil { + return nil, err + } + for _, conv := range transforms { + min, err := ipToUint(conv.Low) + if err != nil { + return nil, err + } + max, err := ipToUint(conv.High) + if err != nil { + return nil, err + } + if (thisIP >= min) && (thisIP <= max) { + if conv.NewIPs != nil { + return conv.NewIPs, nil + } + newbase, err := ipToUint(conv.NewBase) + if err != nil { + return nil, err + } + return []net.IP{UintToIP(newbase + (thisIP - min))}, nil + } + } + return []net.IP{address}, nil +} diff --git a/transform/transform_test.go b/transform/transform_test.go new file mode 100644 index 000000000..391c5e12d --- /dev/null +++ b/transform/transform_test.go @@ -0,0 +1,213 @@ +package transform + +import ( + "net" + "testing" +) + +func TestIPToUint(t *testing.T) { + ip := net.ParseIP("1.2.3.4") + u, err := ipToUint(ip) + if err != nil { + t.Fatal(err) + } + if u != 16909060 { + t.Fatalf("I to uint conversion failed. Should be 16909060. Got %d", u) + } + ip2 := UintToIP(u) + if !ip.Equal(ip2) { + t.Fatalf("IPs should be equal. %s is not %s", ip2, ip) + } +} + +func Test_DecodeTransformTable_failures(t *testing.T) { + result, err := DecodeTransformTable("1.2.3.4 ~ 3.4.5.6") + if result != nil { + t.Errorf("expected nil, got (%v)\n", result) + } + if err == nil { + t.Error("expect error, got none") + } +} + +func test_ip(t *testing.T, test string, expected string, actual net.IP) { + if !net.ParseIP(expected).Equal(actual) { + t.Errorf("Test %v: expected Low (%v), got (%v)\n", test, actual, expected) + } +} + +func Test_DecodeTransformTable_0(t *testing.T) { + result, err := DecodeTransformTable("1.2.3.4 ~ 2.3.4.5 ~ 3.4.5.6 ~ ") + if err != nil { + t.Fatal(err) + } + if len(result) != 1 { + t.Errorf("Test %v: expected col length (%v), got (%v)\n", 1, 1, len(result)) + } + test_ip(t, "low", "1.2.3.4", result[0].Low) + test_ip(t, "high", "2.3.4.5", result[0].High) + test_ip(t, "newBase", "3.4.5.6", result[0].NewBase) + //test_ip(t, "newIP", "", result[0].NewIPs) +} + +func Test_DecodeTransformTable_1(t *testing.T) { + result, err := DecodeTransformTable("1.2.3.4~2.3.4.5~3.4.5.6 ~;8.7.6.5 ~ 9.8.7.6 ~ 7.6.5.4 ~ ") + if err != nil { + t.Fatal(err) + } + if len(result) != 2 { + t.Errorf("Test %v: expected col length (%v), got (%v)\n", 1, 2, len(result)) + } + test_ip(t, "Low[0]", "1.2.3.4", result[0].Low) + test_ip(t, "High[0]", "2.3.4.5", result[0].High) + test_ip(t, "NewBase[0]", "3.4.5.6", result[0].NewBase) + //test_ip(t, "newIP[0]", "", result[0].NewIP) + test_ip(t, "Low[1]", "8.7.6.5", result[1].Low) + test_ip(t, "High[1]", "9.8.7.6", result[1].High) + test_ip(t, "NewBase[1]", "7.6.5.4", result[1].NewBase) + //test_ip(t, "newIP[1]", "", result[0].NewIP) +} +func Test_DecodeTransformTable_NewIP(t *testing.T) { + result, err := DecodeTransformTable("1.2.3.4 ~ 2.3.4.5 ~ ~ 3.4.5.6 ") + if err != nil { + t.Fatal(err) + } + if len(result) != 1 { + t.Errorf("Test %v: expected col length (%v), got (%v)\n", 1, 1, len(result)) + } + test_ip(t, "low", "1.2.3.4", result[0].Low) + test_ip(t, "high", "2.3.4.5", result[0].High) + //test_ip(t, "newIP", "3.4.5.6", result[0].NewIP) + test_ip(t, "newBase", "", result[0].NewBase) +} + +func Test_DecodeTransformTable_order(t *testing.T) { + raw := "9.8.7.6 ~ 8.7.6.5 ~ 7.6.5.4 ~" + result, err := DecodeTransformTable(raw) + if result != nil { + t.Errorf("Invalid range not detected: (%v)\n", raw) + } + if err == nil { + t.Error("expect error, got none") + } +} + +func Test_DecodeTransformTable_Base_and_IP(t *testing.T) { + raw := "1.1.1.1~ 8.7.6.5 ~ 7.6.5.4 ~ 4.4.4.4" + result, err := DecodeTransformTable(raw) + if result != nil { + t.Errorf("NewBase and NewIP should not both be specified: (%v)\n", raw) + } + if err == nil { + t.Error("expect error, got none") + } +} + +func Test_TransformIP(t *testing.T) { + + var transforms1 = []IpConversion{{ + Low: net.ParseIP("11.11.11.0"), + High: net.ParseIP("11.11.11.20"), + NewBase: net.ParseIP("99.99.99.0"), + }, { + Low: net.ParseIP("22.22.22.0"), + High: net.ParseIP("22.22.22.40"), + NewBase: net.ParseIP("99.99.99.100"), + }, { + Low: net.ParseIP("33.33.33.20"), + High: net.ParseIP("33.33.35.40"), + NewBase: net.ParseIP("100.100.100.0"), + }, { + Low: net.ParseIP("44.44.44.20"), + High: net.ParseIP("44.44.44.40"), + NewBase: net.ParseIP("100.100.100.40"), + }} + + var tests = []struct { + experiment string + expected string + }{ + {"11.11.11.0", "99.99.99.0"}, + {"11.11.11.1", "99.99.99.1"}, + {"11.11.11.11", "99.99.99.11"}, + {"11.11.11.19", "99.99.99.19"}, + {"11.11.11.20", "99.99.99.20"}, + {"11.11.11.21", "11.11.11.21"}, + {"22.22.22.22", "99.99.99.122"}, + {"22.22.22.255", "22.22.22.255"}, + {"33.33.33.0", "33.33.33.0"}, + {"33.33.33.19", "33.33.33.19"}, + {"33.33.33.20", "100.100.100.0"}, + {"33.33.33.21", "100.100.100.1"}, + {"33.33.33.33", "100.100.100.13"}, + {"33.33.35.39", "100.100.102.19"}, + {"33.33.35.40", "100.100.102.20"}, + {"33.33.35.41", "33.33.35.41"}, + {"44.44.44.24", "100.100.100.44"}, + {"44.44.44.44", "44.44.44.44"}, + } + + for _, test := range tests { + experiment := net.ParseIP(test.experiment) + expected := net.ParseIP(test.expected) + actual, err := TransformIP(experiment, transforms1) + if err != nil { + t.Errorf("%v: got an err: %v\n", experiment, err) + } + if !expected.Equal(actual) { + t.Errorf("%v: expected (%v) got (%v)\n", experiment, expected, actual) + } + } +} + +func Test_TransformIP_NewIP(t *testing.T) { + + var transforms1 = []IpConversion{{ + Low: net.ParseIP("11.11.11.0"), + High: net.ParseIP("11.11.11.20"), + NewIPs: []net.IP{net.ParseIP("1.1.1.1")}, + }, { + Low: net.ParseIP("22.22.22.0"), + High: net.ParseIP("22.22.22.40"), + NewIPs: []net.IP{net.ParseIP("2.2.2.2")}, + }, { + Low: net.ParseIP("33.33.33.20"), + High: net.ParseIP("33.33.35.40"), + NewIPs: []net.IP{net.ParseIP("3.3.3.3")}, + }, + } + + var tests = []struct { + experiment string + expected string + }{ + {"11.11.11.0", "1.1.1.1"}, + {"11.11.11.1", "1.1.1.1"}, + {"11.11.11.11", "1.1.1.1"}, + {"11.11.11.19", "1.1.1.1"}, + {"11.11.11.20", "1.1.1.1"}, + {"11.11.11.21", "11.11.11.21"}, + {"22.22.22.22", "2.2.2.2"}, + {"22.22.22.255", "22.22.22.255"}, + {"33.33.33.0", "33.33.33.0"}, + {"33.33.33.19", "33.33.33.19"}, + {"33.33.33.20", "3.3.3.3"}, + {"33.33.33.21", "3.3.3.3"}, + {"33.33.33.33", "3.3.3.3"}, + {"33.33.35.39", "3.3.3.3"}, + {"33.33.35.40", "3.3.3.3"}, + {"33.33.35.41", "33.33.35.41"}, + } + + for _, test := range tests { + experiment := net.ParseIP(test.experiment) + expected := net.ParseIP(test.expected) + actual, err := TransformIP(experiment, transforms1) + if err != nil { + t.Errorf("%v: got an err: %v\n", experiment, err) + } + if !expected.Equal(actual) { + t.Errorf("%v: expected (%v) got (%v)\n", experiment, expected, actual) + } + } +} diff --git a/vendor/github.com/DisposaBoy/JsonConfigReader/AUTHORS.md b/vendor/github.com/DisposaBoy/JsonConfigReader/AUTHORS.md new file mode 100644 index 000000000..d495fb75e --- /dev/null +++ b/vendor/github.com/DisposaBoy/JsonConfigReader/AUTHORS.md @@ -0,0 +1,4 @@ +This is the official list of JsonConfigReader authors for copyright purposes. + +* DisposaBoy `https://github.com/DisposaBoy` +* Steven Osborn `https://github.com/steve918` diff --git a/vendor/github.com/DisposaBoy/JsonConfigReader/LICENSE.md b/vendor/github.com/DisposaBoy/JsonConfigReader/LICENSE.md new file mode 100644 index 000000000..3f4ba513f --- /dev/null +++ b/vendor/github.com/DisposaBoy/JsonConfigReader/LICENSE.md @@ -0,0 +1,7 @@ +Copyright (c) 2012 The JsonConfigReader Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/DisposaBoy/JsonConfigReader/README.md b/vendor/github.com/DisposaBoy/JsonConfigReader/README.md new file mode 100644 index 000000000..5df220753 --- /dev/null +++ b/vendor/github.com/DisposaBoy/JsonConfigReader/README.md @@ -0,0 +1,36 @@ +JsonConfigReader is a proxy for [golang's io.Reader](http://golang.org/pkg/io/#Reader) that strips line comments and trailing commas, allowing you to use json as a *reasonable* config format. + +Comments start with `//` and continue to the end of the line. + +If a trailing comma is in front of `]` or `}` it will be stripped as well. + + +Given `settings.json` + + { + "key": "value", // k:v + + // a list of numbers + "list": [1, 2, 3], + } + + +You can read it in as a *normal* json file: + + package main + + import ( + "encoding/json" + "fmt" + "github.com/DisposaBoy/JsonConfigReader" + "os" + ) + + func main() { + var v interface{} + f, _ := os.Open("settings.json") + // wrap our reader before passing it to the json decoder + r := JsonConfigReader.New(f) + json.NewDecoder(r).Decode(&v) + fmt.Println(v) + } diff --git a/vendor/github.com/DisposaBoy/JsonConfigReader/reader.go b/vendor/github.com/DisposaBoy/JsonConfigReader/reader.go new file mode 100644 index 000000000..06ae65d20 --- /dev/null +++ b/vendor/github.com/DisposaBoy/JsonConfigReader/reader.go @@ -0,0 +1,94 @@ +package JsonConfigReader + +import ( + "bytes" + "io" +) + +type state struct { + r io.Reader + br *bytes.Reader +} + +func isNL(c byte) bool { + return c == '\n' || c == '\r' +} + +func isWS(c byte) bool { + return c == ' ' || c == '\t' || isNL(c) +} + +func consumeComment(s []byte, i int) int { + if i < len(s) && s[i] == '/' { + s[i-1] = ' ' + for ; i < len(s) && !isNL(s[i]); i += 1 { + s[i] = ' ' + } + } + return i +} + +func prep(r io.Reader) (s []byte, err error) { + buf := &bytes.Buffer{} + _, err = io.Copy(buf, r) + s = buf.Bytes() + if err != nil { + return + } + + i := 0 + for i < len(s) { + switch s[i] { + case '"': + i += 1 + for i < len(s) { + if s[i] == '"' { + i += 1 + break + } else if s[i] == '\\' { + i += 1 + } + i += 1 + } + case '/': + i = consumeComment(s, i+1) + case ',': + j := i + for { + i += 1 + if i >= len(s) { + break + } else if s[i] == '}' || s[i] == ']' { + s[j] = ' ' + break + } else if s[i] == '/' { + i = consumeComment(s, i+1) + } else if !isWS(s[i]) { + break + } + } + default: + i += 1 + } + } + return +} + +// Read acts as a proxy for the underlying reader and cleans p +// of comments and trailing commas preceeding ] and } +// comments are delimitted by // up until the end the line +func (st *state) Read(p []byte) (n int, err error) { + if st.br == nil { + var s []byte + if s, err = prep(st.r); err != nil { + return + } + st.br = bytes.NewReader(s) + } + return st.br.Read(p) +} + +// New returns an io.Reader acting as proxy to r +func New(r io.Reader) io.Reader { + return &state{r: r} +} diff --git a/vendor/github.com/NYTimes/gziphandler/LICENSE.md b/vendor/github.com/NYTimes/gziphandler/LICENSE.md new file mode 100644 index 000000000..b7e2ecb63 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/LICENSE.md @@ -0,0 +1,13 @@ +Copyright (c) 2015 The New York Times Company + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this library except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/NYTimes/gziphandler/README.md b/vendor/github.com/NYTimes/gziphandler/README.md new file mode 100644 index 000000000..b1d55e26e --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/README.md @@ -0,0 +1,52 @@ +Gzip Handler +============ + +This is a tiny Go package which wraps HTTP handlers to transparently gzip the +response body, for clients which support it. Although it's usually simpler to +leave that to a reverse proxy (like nginx or Varnish), this package is useful +when that's undesirable. + + +## Usage + +Call `GzipHandler` with any handler (an object which implements the +`http.Handler` interface), and it'll return a new handler which gzips the +response. For example: + +```go +package main + +import ( + "io" + "net/http" + "github.com/NYTimes/gziphandler" +) + +func main() { + withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "Hello, World") + }) + + withGz := gziphandler.GzipHandler(withoutGz) + + http.Handle("/", withGz) + http.ListenAndServe("0.0.0.0:8000", nil) +} +``` + + +## Documentation + +The docs can be found at [godoc.org] [docs], as usual. + + +## License + +[Apache 2.0] [license]. + + + + +[docs]: https://godoc.org/github.com/nytimes/gziphandler +[license]: https://github.com/nytimes/gziphandler/blob/master/LICENSE.md diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go new file mode 100644 index 000000000..87a841f66 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/gzip.go @@ -0,0 +1,200 @@ +package gziphandler + +import ( + "compress/gzip" + "fmt" + "net/http" + "strconv" + "strings" + "sync" +) + +const ( + vary = "Vary" + acceptEncoding = "Accept-Encoding" + contentEncoding = "Content-Encoding" +) + +type codings map[string]float64 + +// The default qvalue to assign to an encoding if no explicit qvalue is set. +// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct. +// The examples seem to indicate that it is. +const DEFAULT_QVALUE = 1.0 + +// gzipWriterPools stores a sync.Pool for each compression level for re-uze of gzip.Writers. +// Use poolIndex to covert a compression level to an index into gzipWriterPools. +var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool + +func init() { + for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ { + addLevelPool(i) + } + addLevelPool(gzip.DefaultCompression) +} + +// poolIndex maps a compression level to its index into gzipWriterPools. It assumes that +// level is a valid gzip compression level. +func poolIndex(level int) int { + // gzip.DefaultCompression == -1, so we need to treat it special. + if level == gzip.DefaultCompression { + return gzip.BestCompression - gzip.BestSpeed + 1 + } + return level - gzip.BestSpeed +} + +func addLevelPool(level int) { + gzipWriterPools[poolIndex(level)] = &sync.Pool{ + New: func() interface{} { + // NewWriterLevel only returns error on a bad level, we are guaranteeing + // that this will be a valid level so it is okay to ignore the returned + // error. + w, _ := gzip.NewWriterLevel(nil, level) + return w + }, + } +} + +// GzipResponseWriter provides an http.ResponseWriter interface, which gzips +// bytes before writing them to the underlying response. This doesn't set the +// Content-Encoding header, nor close the writers, so don't forget to do that. +type GzipResponseWriter struct { + gw *gzip.Writer + http.ResponseWriter +} + +// Write appends data to the gzip writer. +func (w GzipResponseWriter) Write(b []byte) (int, error) { + if _, ok := w.Header()["Content-Type"]; !ok { + // If content type is not set, infer it from the uncompressed body. + w.Header().Set("Content-Type", http.DetectContentType(b)) + } + return w.gw.Write(b) +} + +// Flush flushes the underlying *gzip.Writer and then the underlying +// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter +// an http.Flusher. +func (w GzipResponseWriter) Flush() { + w.gw.Flush() + if fw, ok := w.ResponseWriter.(http.Flusher); ok { + fw.Flush() + } +} + +// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in an error case +// it panics rather than returning an error. +func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler { + wrap, err := NewGzipLevelHandler(level) + if err != nil { + panic(err) + } + return wrap +} + +// NewGzipLevelHandler returns a wrapper function (often known as middleware) +// which can be used to wrap an HTTP handler to transparently gzip the response +// body if the client supports it (via the Accept-Encoding header). Responses will +// be encoded at the given gzip compression level. An error will be returned only +// if an invalid gzip compression level is given, so if one can ensure the level +// is valid, the returned error can be safely ignored. +func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) { + if level != gzip.DefaultCompression && (level < gzip.BestSpeed || level > gzip.BestCompression) { + return nil, fmt.Errorf("invalid compression level requested: %d", level) + } + return func(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add(vary, acceptEncoding) + + if acceptsGzip(r) { + // Bytes written during ServeHTTP are redirected to this gzip writer + // before being written to the underlying response. + gzw := gzipWriterPools[poolIndex(level)].Get().(*gzip.Writer) + defer gzipWriterPools[poolIndex(level)].Put(gzw) + gzw.Reset(w) + defer gzw.Close() + + w.Header().Set(contentEncoding, "gzip") + h.ServeHTTP(GzipResponseWriter{gzw, w}, r) + } else { + h.ServeHTTP(w, r) + } + }) + }, nil +} + +// GzipHandler wraps an HTTP handler, to transparently gzip the response body if +// the client supports it (via the Accept-Encoding header). This will compress at +// the default compression level. +func GzipHandler(h http.Handler) http.Handler { + wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression) + return wrapper(h) +} + +// acceptsGzip returns true if the given HTTP request indicates that it will +// accept a gzippped response. +func acceptsGzip(r *http.Request) bool { + acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding)) + return acceptedEncodings["gzip"] > 0.0 +} + +// parseEncodings attempts to parse a list of codings, per RFC 2616, as might +// appear in an Accept-Encoding header. It returns a map of content-codings to +// quality values, and an error containing the errors encounted. It's probably +// safe to ignore those, because silently ignoring errors is how the internet +// works. +// +// See: http://tools.ietf.org/html/rfc2616#section-14.3 +func parseEncodings(s string) (codings, error) { + c := make(codings) + e := make([]string, 0) + + for _, ss := range strings.Split(s, ",") { + coding, qvalue, err := parseCoding(ss) + + if err != nil { + e = append(e, err.Error()) + + } else { + c[coding] = qvalue + } + } + + // TODO (adammck): Use a proper multi-error struct, so the individual errors + // can be extracted if anyone cares. + if len(e) > 0 { + return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", ")) + } + + return c, nil +} + +// parseCoding parses a single conding (content-coding with an optional qvalue), +// as might appear in an Accept-Encoding header. It attempts to forgive minor +// formatting errors. +func parseCoding(s string) (coding string, qvalue float64, err error) { + for n, part := range strings.Split(s, ";") { + part = strings.TrimSpace(part) + qvalue = DEFAULT_QVALUE + + if n == 0 { + coding = strings.ToLower(part) + + } else if strings.HasPrefix(part, "q=") { + qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64) + + if qvalue < 0.0 { + qvalue = 0.0 + + } else if qvalue > 1.0 { + qvalue = 1.0 + } + } + } + + if coding == "" { + err = fmt.Errorf("empty content-coding") + } + + return +} diff --git a/vendor/github.com/TomOnTime/utfutil/LICENSE b/vendor/github.com/TomOnTime/utfutil/LICENSE new file mode 100644 index 000000000..de5eead01 --- /dev/null +++ b/vendor/github.com/TomOnTime/utfutil/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2016, Tom Limoncelli +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of utfutil nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/TomOnTime/utfutil/README.md b/vendor/github.com/TomOnTime/utfutil/README.md new file mode 100644 index 000000000..b821c85c4 --- /dev/null +++ b/vendor/github.com/TomOnTime/utfutil/README.md @@ -0,0 +1,70 @@ +# utfutil + +Utilities to make it easier to read text encoded as UTF-16. + +## Dealing with UTF-16 files from Windows. + +Ever have code that worked for years until you received a file from a MS-Windows system that just didn't work at all? Looking at a hex dump you realize every other byte is \0. WTF? No, UTF. More specifically UTF-16LE with an optional BOM. + +What does all that mean? Well, first you should read ["The Absolute Minimum Every Software Developer Absolutely, Positively Must Know About Unicode and Character Sets (No Excuses!)"](http://www.joelonsoftware.com/articles/Unicode.html) by Joel Spolsky. + +Now you are an expert. You can spend an afternoon trying to figure out how the heck to put all that together and use `golang.org/x/text/encoding/unicode` to decode UTF-16LE. However I've already done that for you. Now you can take the easy way out change ioutil.ReadFile() to utfutil.ReadFile(). Everything will just work. + +### utfutil.ReadFile() is the equivalent of ioutil.ReadFile() + +OLD: Works with UTF8 and ASCII files: + +``` + data, err := ioutil.ReadFile(filename) +``` + +NEW: Works if someone gives you a Windows UTF-16LE file occasionally but normally you are processing UTF8 files: + +``` + data, err := utfutil.ReadFile(filename, utfutil.UTF8) +``` + +### utfutil.OpenFile() is the equivalent of os.Open(). + +OLD: Works with UTF8 and ASCII files: + +``` + data, err := os.Open(filename) +``` + +NEW: Works if someone gives you a file with a BOM: + +``` + data, err := utfutil.OpenFile(filename, utfutil.HTML5) +``` + +### utfutil.NewScanner() is for reading files line-by-line + +It works like os.Open(): + +``` + s, err := utfutil.NewScanner(filename, utfutil.HTML5) +``` + + +## Encoding hints: + +What's that second argument all about? + +Since it is impossible to guess 100% correctly if there is no BOM, +the functions take a 2nd parameter of type "EncodingHint" where you +specify the default encoding for BOM-less files. + +``` +UTF8 No BOM? Assume UTF-8 +UTF16LE No BOM? Assume UTF 16 Little Endian +UTF16BE No BOM? Assume UTF 16 Big Endian +WINDOWS = UTF16LE (i.e. a reasonable guess if file is from MS-Windows) +POSIX = UTF8 (i.e. a reasonable guess if file is from Unix or Unix-like systems) +HTML5 = UTF8 (i.e. a reasonable guess if file is from the web) +``` + +## Future Directions + +If someone writes a golang equivalent of uchatdet, I'll add a hint +called "AUTO" which uses it. That would be awesome. Volunteers? diff --git a/vendor/github.com/TomOnTime/utfutil/utfutil.go b/vendor/github.com/TomOnTime/utfutil/utfutil.go new file mode 100644 index 000000000..a7823abac --- /dev/null +++ b/vendor/github.com/TomOnTime/utfutil/utfutil.go @@ -0,0 +1,110 @@ +// Package utfutil provides methods that make it easy to read data in an UTF-encoding agnostic. +package utfutil + +// These functions autodetect UTF BOM and return UTF-8. If no +// BOM is found, a hint is provided as to which encoding to assume. +// You can use them as replacements for os.Open() and ioutil.ReadFile() +// when the encoding of the file is unknown. + +// utfutil.OpenFile() is a replacement for os.Open(). +// utfutil.ReadFile() is a replacement for ioutil.ReadFile(). +// utfutil.NewScanner() takes a filename and returns a Scanner. +// utfutil.NewReader() rewraps an existing scanner to make it UTF-encoding agnostic. +// utfutil.BytesReader() takes a []byte and decodes it to UTF-8. + +// Since it is impossible to guess 100% correctly if there is no BOM, +// the functions take a 2nd parameter of type "EncodingHint" where you +// specify the default encoding for BOM-less data. + +// If someone writes a golang equivalent of uchatdet, I'll add +// a hint called "AUTO" which uses it. + +// Inspiration: I wrote this after spending half a day trying +// to figure out how to use unicode.BOMOverride. +// Hopefully this will save other golang newbies from the same. +// (golang.org/x/text/encoding/unicode) + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" + "os" + + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/unicode" + "golang.org/x/text/transform" +) + +// EncodingHint indicates the file's encoding if there is no BOM. +type EncodingHint int + +const ( + UTF8 EncodingHint = iota // UTF-8 + UTF16LE // UTF 16 Little Endian + UTF16BE // UTF 16 Big Endian + WINDOWS = UTF16LE // File came from a MS-Windows system + POSIX = UTF8 // File came from Unix or Unix-like systems + HTML5 = UTF8 // File came from the web +) + +// About utfutil.HTML5: +// This technique is recommended by the W3C for use in HTML 5: +// "For compatibility with deployed content, the byte order +// mark (also known as BOM) is considered more authoritative +// than anything else." http://www.w3.org/TR/encoding/#specification-hooks + +// OpenFile is the equivalent of os.Open(). +func OpenFile(name string, d EncodingHint) (io.Reader, error) { + f, err := os.Open(name) + if err != nil { + return nil, err + } + return NewReader(f, d), nil +} + +// ReadFile is the equivalent of ioutil.ReadFile() +func ReadFile(name string, d EncodingHint) ([]byte, error) { + file, err := OpenFile(name, d) + if err != nil { + return nil, err + } + return ioutil.ReadAll(file) +} + +// NewScanner is a convenience function that takes a filename and returns a scanner. +func NewScanner(name string, d EncodingHint) (*bufio.Scanner, error) { + f, err := OpenFile(name, d) + if err != nil { + return nil, err + } + return bufio.NewScanner(f), nil +} + +// NewReader wraps a Reader to decode Unicode to UTF-8 as it reads. +func NewReader(r io.Reader, d EncodingHint) io.Reader { + var decoder *encoding.Decoder + switch d { + case UTF8: + // Make a transformer that assumes UTF-8 but abides by the BOM. + decoder = unicode.UTF8.NewDecoder() + case UTF16LE: + // Make an tranformer that decodes MS-Windows (16LE) UTF files: + winutf := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM) + // Make a transformer that is like winutf, but abides by BOM if found: + decoder = winutf.NewDecoder() + case UTF16BE: + // Make an tranformer that decodes UTF-16BE files: + utf16be := unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM) + // Make a transformer that is like utf16be, but abides by BOM if found: + decoder = utf16be.NewDecoder() + } + + // Make a Reader that uses utf16bom: + return transform.NewReader(r, unicode.BOMOverride(decoder)) +} + +// BytesReader is a convenience function that takes a []byte and decodes them to UTF-8. +func BytesReader(b []byte, d EncodingHint) io.Reader { + return NewReader(bytes.NewReader(b), d) +} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 000000000..5f14d1162 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 000000000..56fdfc2bf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,145 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 000000000..0202a008f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,194 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 000000000..8429470b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,100 @@ +package awsutil + +import ( + "io" + "reflect" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + dst.Set(reflect.New(e)) + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 000000000..59fa4a558 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000..4d2a01e8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 000000000..fc38172fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,107 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000..b6432f1a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 000000000..c8d0564d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,120 @@ +package client + +import ( + "fmt" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint, SigningRegion string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers, + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFront(logRequest) + c.Handlers.Send.PushBack(logResponse) +} + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + var msg = "no response data" + if r.HTTPResponse != nil { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + msg = string(dumpedBody) + } else if r.Error != nil { + msg = r.Error.Error() + } + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 000000000..43a3676b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,90 @@ +package client + +import ( + "math/rand" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + minTime = 500 + } + + retryCount := r.RetryCount + if retryCount > 13 { + retryCount = 13 + } else if throttle && retryCount > 8 { + retryCount = 8 + } + + delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + if r.HTTPResponse.StatusCode == 502 || + r.HTTPResponse.StatusCode == 503 || + r.HTTPResponse.StatusCode == 504 { + return true + } + return r.IsErrorThrottle() +} + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 000000000..4778056dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 000000000..fca922584 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,422 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig tructure. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess, err := session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// }) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + // + // Not compatible with UseDualStack requests will fail if both flags are + // specified. + S3UseAccelerate *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpiont to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess, err := session.NewSession() + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + SleepDelay func(time.Duration) +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess, err := session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// ) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 000000000..3b73a7da7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,369 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 000000000..8456e29b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,152 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be aded to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { + var err error + r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + } +}} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + r.Config.SleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 000000000..7d50b1557 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 000000000..857311f64 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// vai the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 000000000..7b8ebf5f9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,223 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 000000000..aa9d689a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 000000000..a4cec5c55 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,191 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials +// from an arbitrary endpoint concurrently. The client will request the +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 000000000..96655bc46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,77 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 000000000..7fc91d9d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 000000000..7fb7cbf0d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 000000000..4f5dab3fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000..30c847ae2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,161 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. This provider must be used explicitly, +// as it is not included in the credentials chain. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + TokenCode *string + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfiede by the STS client. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil && p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } + roleOutput, err := p.Client.AssumeRole(input) + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 000000000..10b7d8649 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,129 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithSleepDelay(time.Sleep) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + }, + }) +} + +// RemoteCredProvider returns a credenitials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + + if len(ecsCredURI) > 0 { + return ecsCredProvider(cfg, handlers, ecsCredURI) + } + + return ec2RoleProvider(cfg, handlers) +} + +func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider { + const host = `169.254.170.2` + + return endpointcreds.NewProviderClient(cfg, handlers, + fmt.Sprintf("http://%s%s", host, uri), + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, + aws.StringValue(cfg.Region), true, false) + + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 000000000..669c813a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,140 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New("SerializationError", + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshalling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshalling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 000000000..5b4379dbd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,124 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 000000000..576636168 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 000000000..db87188e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,112 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 000000000..5279c19c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,187 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 000000000..a4087f20e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,33 @@ +// +build go1.5 + +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + Close: r.Close, + Body: body, + Host: r.Host, + Method: r.Method, + Proto: r.Proto, + ContentLength: r.ContentLength, + // Cancel will be deprecated in 1.7 and will be replaced with Context + Cancel: r.Cancel, + } + + *req.URL = *r.URL + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go new file mode 100644 index 000000000..75da021ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go @@ -0,0 +1,31 @@ +// +build !go1.5 + +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + Close: r.Close, + Body: body, + Host: r.Host, + Method: r.Method, + Proto: r.Proto, + ContentLength: r.ContentLength, + } + + *req.URL = *r.URL + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 000000000..da6396d2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,49 @@ +package request + +import ( + "io" + "sync" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.RWMutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, 0) + + reader.buf = buf + return reader +} + +// Close is a thread-safe close. Uses the write lock. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read using a read lock. +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.RLock() + defer o.lock.RUnlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 000000000..2832aaa43 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,326 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + + built bool +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = newOffsetReader(reader, 0) + r.Body = reader +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request returning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +func (r *Request) Send() error { + for { + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + var body io.ReadCloser + if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok { + body = reader.CloseAndCopy(r.BodyStart) + } else { + if r.Config.Logger != nil { + r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions") + } + r.Body.Seek(r.BodyStart, 0) + body = ioutil.NopCloser(r.Body) + } + + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body) + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + // Closing response body. Since we are setting a new request to send off, this + // response will get squashed and leaked. + r.HTTPResponse.Body.Close() + } + } + + r.Sign() + if r.Error != nil { + return r.Error + } + + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + if strings.Contains(r.Error.Error(), "net/http: request canceled") { + return r.Error + } + + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 000000000..2939ec473 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,104 @@ +package request + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 000000000..8cc8b015a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,101 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +func (r *Request) IsErrorThrottle() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeThrottle(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 000000000..2520286b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,234 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 000000000..4ad78d7f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,229 @@ +/* +Package session provides configuration for the SDK's service clients. + +Sessions can be shared across all service clients that share the same base +configuration. The Session is built from the SDK's default configuration and +request handlers. + +Sessions should be cached when possible, because creating a new Session will +load all configuration values from the environment, and config files each time +the Session is created. Sharing the Session value across all of your service +clients will ensure the configuration is loaded the fewest number of times possible. + +Concurrency + +Sessions are safe to use concurrently as long as the Session is not being +modified. The SDK will not modify the Session once the Session has been created. +Creating service clients concurrently from a shared Session is safe. + +Sessions from Shared Config + +Sessions can be created using the method above that will only load the +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. +Alternatively you can explicitly create a Session with shared config enabled. +To do this you can use NewSessionWithOptions to configure how the Session will +be created. Using the NewSessionWithOptions with SharedConfigState set to +SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG +environment variable was set. + +Creating Sessions + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. See the section Sessions from Shared Config for +more information. + +Create a Session with the default config and request handlers. With credentials +region, and profile loaded from the environment and shared config automatically. +Requires the AWS_PROFILE to be set, or "default" is used. + + // Create Session + sess, err := session.NewSession() + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")}) + + // Create a S3 client instance from a session + sess, err := session.NewSession() + if err != nil { + // Handle Session creation error + } + svc := s3.New(sess) + +Create Session With Option Overrides + +In addition to NewSession, Sessions can be created using NewSessionWithOptions. +This func allows you to control and override how the Session will be created +through code instead of being driven by environment variables only. + +Use NewSessionWithOptions when you want to provide the config profile, or +override the shared config state (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.New + sess, err := session.NewSessionWithOptions(session.Options{}) + + // Specify profile to load for the session's config + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "profile_name", + }) + + // Specify profile for config and region for requests + sess, err := session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Region: aws.String("us-east-1")}, + Profile: "profile_name", + }) + + // Force enable Shared Config support + sess, err := session.NewSessionWithOptions(session.Options{ + SharedConfigState: SharedConfigEnable, + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, the NewWithError + +Adding Handlers + +You can add handlers to a session for processing HTTP requests. All service +clients that use the session inherit the handlers. For example, the following +handler logs every request and its payload made by a service client: + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess, err := session.NewSession() + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Println("Request: %s/%s, Payload: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, NewSession was created so errors can be retrieved when +creating a session fails. + +Shared Config Fields + +By default the SDK will only load the shared credentials file's (~/.aws/credentials) +credentials values, and all other config is provided by the environment variables, +SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared credentials +file (~/.aws/config). + +Credentials are the values the SDK should use for authenticating requests with +AWS Services. They arfrom a configuration file will need to include both +aws_access_key_id and aws_secret_access_key must be provided together in the +same file to be considered valid. The values will be ignored if not a complete +group. aws_session_token is an optional field that can be provided if both of +the other two fields are also provided. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + +Assume Role values allow you to configure the SDK to assume an IAM role using +a set of credentials provided in a config file via the source_profile field. +Both "role_arn" and "source_profile" are required. The SDK does not support +assuming a role with MFA token Via the Session's constructor. You can use the +stscreds.AssumeRoleProvider credentials provider to specify custom +configuration and support for MFA. + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = not supported! + role_session_name = session_name + +Region is the region the SDK should use for looking up AWS service endpoints +and signing requests. + + region = us-east-1 + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + + +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 000000000..d2f0c8448 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,188 @@ +package session + +import ( + "os" + "path/filepath" + "strconv" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string +} + +var ( + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() envConfig { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() envConfig { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) envConfig { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = "EnvConfigCredentials" + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + cfg.SharedCredentialsFile = sharedCredentialsFilename() + cfg.SharedConfigFile = sharedConfigFilename() + + return cfg +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} + +func sharedCredentialsFilename() string { + if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "credentials") +} + +func sharedConfigFilename() string { + if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "config") +} + +func userHomeDir() string { + homeDir := os.Getenv("HOME") // *nix + if len(homeDir) == 0 { // windows + homeDir = os.Getenv("USERPROFILE") + } + + return homeDir +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 000000000..2374b1f27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,388 @@ +package session + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ClientConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occured while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functiions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg := loadEnvConfig() + + if envCfg.EnableSharedConfig { + s, err := newSession(envCfg, cfgs...) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occuring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + return s + } + + return oldNewSession(cfgs...) +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created. Such as specifing the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + envCfg := loadEnvConfig() + + return newSession(envCfg, cfgs...) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevent. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess, err := session.NewSessionWithOptions(session.Options{}) +// +// // Specify profile to load for the session's config +// sess, err := session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// }) +// +// // Specify profile for config and region for requests +// sess, err := session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// }) +// +// // Force enable Shared Config support +// sess, err := session.NewSessionWithOptions(session.Options{ +// SharedConfigState: SharedConfigEnable, +// }) +func NewSessionWithOptions(opts Options) (*Session, error) { + envCfg := loadEnvConfig() + + if len(opts.Profile) > 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + return newSession(envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func oldNewSession(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + + // Order config files will be loaded in with later files overwriting + // previous config file values. + cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + if err != nil { + return nil, err + } + + mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) { + // Merge in user provided configuration + cfg.MergeIn(userCfg) + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + // Configure credentials if not already set + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + if len(envCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + cfgCp := *cfg + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.AssumeRoleSource.Creds, + ) + cfg.Credentials = stscreds.NewCredentials( + &Session{ + Config: &cfgCp, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // MFA not supported + }, + ) + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + } else { + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, + &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + } +} + +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + endpoint, signingRegion := endpoints.NormalizeEndpoint( + aws.StringValue(s.Config.Endpoint), + serviceName, + aws.StringValue(s.Config.Region), + aws.BoolValue(s.Config.DisableSSL), + aws.BoolValue(s.Config.UseDualStack), + ) + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: endpoint, + SigningRegion: signingRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 000000000..0147eedeb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,294 @@ +package session + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/go-ini/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // Additional Config fields + regionKey = `region` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` +) + +type assumeRoleConfig struct { + RoleARN string + SourceProfile string + ExternalID string + MFASerial string + RoleSessionName string +} + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + AssumeRole assumeRoleConfig + AssumeRoleSource *sharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region + Region string +} + +type sharedConfigFile struct { + Filename string + IniData *ini.File +} + +// loadSharedConfig retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + if err = cfg.setFromIniFiles(profile, files); err != nil { + return sharedConfig{}, err + } + + if len(cfg.AssumeRole.SourceProfile) > 0 { + if err := cfg.setAssumeRoleSource(profile, files); err != nil { + return sharedConfig{}, err + } + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + if _, err := os.Stat(filename); os.IsNotExist(err) { + // Trim files from the list that don't exist. + continue + } + + f, err := ini.Load(filename) + if err != nil { + return nil, SharedConfigLoadError{Filename: filename} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: f, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { + var assumeRoleSrc sharedConfig + + // Multiple level assume role chains are not support + if cfg.AssumeRole.SourceProfile == origProfile { + assumeRoleSrc = *cfg + assumeRoleSrc.AssumeRole = assumeRoleConfig{} + } else { + err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + } + + if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + } + + cfg.AssumeRoleSource = &assumeRoleSrc + + return nil +} + +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { + // Trim files from the list that don't exist. + for _, f := range files { + if err := cfg.setFromIniFile(profile, f); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore proviles missings + continue + } + return err + } + } + + return nil +} + +// setFromFile loads the configuration from the file using +// the profile provided. A sharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { + section, err := file.IniData.GetSection(profile) + if err != nil { + // Fallback to to alternate profile name: profile + section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if err != nil { + return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + } + } + + // Shared Credentials + akid := section.Key(accessKeyIDKey).String() + secret := section.Key(secretAccessKey).String() + if len(akid) > 0 && len(secret) > 0 { + cfg.Creds = credentials.Value{ + AccessKeyID: akid, + SecretAccessKey: secret, + SessionToken: section.Key(sessionTokenKey).String(), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + } + + // Assume Role + roleArn := section.Key(roleArnKey).String() + srcProfile := section.Key(sourceProfileKey).String() + if len(roleArn) > 0 && len(srcProfile) > 0 { + cfg.AssumeRole = assumeRoleConfig{ + RoleARN: roleArn, + SourceProfile: srcProfile, + ExternalID: section.Key(externalIDKey).String(), + MFASerial: section.Key(mfaSerialKey).String(), + RoleSessionName: section.Key(roleSessionNameKey).String(), + } + } + + // Region + if v := section.Key(regionKey).String(); len(v) > 0 { + cfg.Region = v + } + + return nil +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", + e.RoleARN) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 000000000..244c86da0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 000000000..7d99f54d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,644 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +package v4 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: exp != 0, + ServiceName: service, + Region: region, + } + + if ctx.isRequestSigned() { + if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) { + // If the request is already signed, and the credentials have not + // expired, and the request is not too old ignore the signing request. + return ctx.SignedHeaderVals, nil + } + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.assignAmzQueryValues() + ctx.build(v4.DisableHeaderHoisting) + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler is bested used only with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now) +} +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + }) + + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTimeFn() +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildBodyDigest() + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + ctx.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + + ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + uri := ctx.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = ctx.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if ctx.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if ctx.isPresign && ctx.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} + +const doubleSpaces = " " + +var doubleSpaceBytes = []byte(doubleSpaces) + +func stripExcessSpaces(headerVals []string) []string { + vals := make([]string, len(headerVals)) + for i, str := range headerVals { + // Trim leading and trailing spaces + trimmed := strings.TrimSpace(str) + + idx := strings.Index(trimmed, doubleSpaces) + var buf []byte + for idx > -1 { + // Multiple adjacent spaces found + if buf == nil { + // first time create the buffer + buf = []byte(trimmed) + } + + stripToIdx := -1 + for j := idx + 1; j < len(buf); j++ { + if buf[j] != ' ' { + buf = append(buf[:idx+1], buf[j:]...) + stripToIdx = j + break + } + } + + if stripToIdx >= 0 { + idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) + if idx >= 0 { + idx += stripToIdx + } + } else { + idx = -1 + } + } + + if buf != nil { + vals[i] = string(buf) + } else { + vals[i] = trimmed + } + } + return vals +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 000000000..fa014b49e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,106 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf[:len(b.buf):len(b.buf)] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 000000000..1fa57165f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.4.1" diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go new file mode 100644 index 000000000..b4ad7405c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -0,0 +1,70 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL, useDualStack) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) { + dualStackField := "" + if useDualStack { + dualStackField = "/dualstack" + } + + derivedKeys := []string{ + region + "/" + svcName + dualStackField, + region + "/*" + dualStackField, + "*/" + svcName + dualStackField, + "*/*" + dualStackField, + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json new file mode 100644 index 000000000..c5bf3c7c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -0,0 +1,78 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "cn-north-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-gov-west-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/data.iot": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/waf": { + "endpoint": "waf.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "*/s3/dualstack": { + "endpoint": "s3.dualstack.{region}.amazonaws.com" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go new file mode 100644 index 000000000..a81d158c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -0,0 +1,91 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "*/s3/dualstack": { + Endpoint: "s3.dualstack.{region}.amazonaws.com", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "cn-north-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 000000000..53831dff9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 000000000..c705481c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000..60ea0bd1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,230 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 000000000..a3ea40955 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,35 @@ +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 000000000..f21429617 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,66 @@ +package query + +import ( + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 000000000..5f412516d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,256 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name) + case "querystring": + err = buildQueryString(query, m, name) + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string) error { + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string) error { + value, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + uri := u.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) + u.Path = uri + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 000000000..4366de2e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 000000000..2cba1d9aa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,198 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 000000000..c74b97e17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,69 @@ +// Package restxml provides RESTful XML serialization of AWS +// requests and responses. +package restxml + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 000000000..da1a68111 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000..221029baf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,293 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000..49f291a85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000..72c198a9d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go new file mode 100644 index 000000000..b51e9449c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go @@ -0,0 +1,134 @@ +package waiter + +import ( + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides a collection of configuration values to setup a generated +// waiter code with. +type Config struct { + Name string + Delay int + MaxAttempts int + Operation string + Acceptors []WaitAcceptor +} + +// A WaitAcceptor provides the information needed to wait for an API operation +// to complete. +type WaitAcceptor struct { + Expected interface{} + Matcher string + State string + Argument string +} + +// A Waiter provides waiting for an operation to complete. +type Waiter struct { + Config + Client interface{} + Input interface{} +} + +// Wait waits for an operation to complete, expire max attempts, or fail. Error +// is returned if the operation fails. +func (w *Waiter) Wait() error { + client := reflect.ValueOf(w.Client) + in := reflect.ValueOf(w.Input) + method := client.MethodByName(w.Config.Operation + "Request") + + for i := 0; i < w.MaxAttempts; i++ { + res := method.Call([]reflect.Value{in}) + req := res[0].Interface().(*request.Request) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) + + err := req.Send() + for _, a := range w.Acceptors { + result := false + var vals []interface{} + switch a.Matcher { + case "pathAll", "path": + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case "pathAny": + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case "status": + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case "error": + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + case "pathList": + // ignored matcher + default: + logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", + w.Config.Operation, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + continue + } + + switch a.State { + case "success": + // waiter completed + return nil + case "failure": + // Waiter failure state triggered + return awserr.New("ResourceNotReady", + fmt.Sprintf("failed waiting for successful resource state"), err) + case "retry": + // clear the error and retry the operation + err = nil + default: + logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", + w.Config.Operation, a.State) + } + } + if err != nil { + return err + } + + time.Sleep(time.Second * time.Duration(w.Delay)) + } + + return awserr.New("ResourceNotReady", + fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) +} + +func logf(client reflect.Value, msg string, args ...interface{}) { + cfgVal := client.FieldByName("Config") + if !cfgVal.IsValid() { + return + } + if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { + cfg.Logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go new file mode 100644 index 000000000..5a07be987 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go @@ -0,0 +1,7830 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53 provides a client for Amazon Route 53. +package route53 + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssociateVPCWithHostedZone = "AssociateVPCWithHostedZone" + +// AssociateVPCWithHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the AssociateVPCWithHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateVPCWithHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateVPCWithHostedZoneRequest method. +// req, resp := client.AssociateVPCWithHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) AssociateVPCWithHostedZoneRequest(input *AssociateVPCWithHostedZoneInput) (req *request.Request, output *AssociateVPCWithHostedZoneOutput) { + op := &request.Operation{ + Name: opAssociateVPCWithHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/associatevpc", + } + + if input == nil { + input = &AssociateVPCWithHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateVPCWithHostedZoneOutput{} + req.Data = output + return +} + +// This action associates a VPC with an hosted zone. +// +// To associate a VPC with an hosted zone, send a POST request to the /Route +// 53 API version/hostedzone/hosted zone ID/associatevpc resource. The request +// body must include a document with a AssociateVPCWithHostedZoneRequest element. +// The response returns the AssociateVPCWithHostedZoneResponse element that +// contains ChangeInfo for you to track the progress of the AssociateVPCWithHostedZoneRequest +// you made. See GetChange operation for how to track the progress of your change. +func (c *Route53) AssociateVPCWithHostedZone(input *AssociateVPCWithHostedZoneInput) (*AssociateVPCWithHostedZoneOutput, error) { + req, out := c.AssociateVPCWithHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opChangeResourceRecordSets = "ChangeResourceRecordSets" + +// ChangeResourceRecordSetsRequest generates a "aws/request.Request" representing the +// client's request for the ChangeResourceRecordSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangeResourceRecordSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangeResourceRecordSetsRequest method. +// req, resp := client.ChangeResourceRecordSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ChangeResourceRecordSetsRequest(input *ChangeResourceRecordSetsInput) (req *request.Request, output *ChangeResourceRecordSetsOutput) { + op := &request.Operation{ + Name: opChangeResourceRecordSets, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset/", + } + + if input == nil { + input = &ChangeResourceRecordSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeResourceRecordSetsOutput{} + req.Data = output + return +} + +// Use this action to create or change your authoritative DNS information. To +// use this action, send a POST request to the /Route 53 API version/hostedzone/hosted +// Zone ID/rrset resource. The request body must include a document with a ChangeResourceRecordSetsRequest +// element. +// +// Changes are a list of change items and are considered transactional. For +// more information on transactional changes, also known as change batches, +// see POST ChangeResourceRecordSets (http://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html) +// in the Amazon Route 53 API Reference. +// +// Due to the nature of transactional changes, you cannot delete the same resource +// record set more than once in a single change batch. If you attempt to delete +// the same change batch more than once, Amazon Route 53 returns an InvalidChangeBatch +// error. In response to a ChangeResourceRecordSets request, your DNS data is +// changed on all Amazon Route 53 DNS servers. Initially, the status of a change +// is PENDING. This means the change has not yet propagated to all the authoritative +// Amazon Route 53 DNS servers. When the change is propagated to all hosts, +// the change returns a status of INSYNC. +// +// Note the following limitations on a ChangeResourceRecordSets request: +// +// A request cannot contain more than 100 Change elements. A request cannot +// contain more than 1000 ResourceRecord elements. The sum of the number of +// characters (including spaces) in all Value elements in a request cannot exceed +// 32,000 characters. +func (c *Route53) ChangeResourceRecordSets(input *ChangeResourceRecordSetsInput) (*ChangeResourceRecordSetsOutput, error) { + req, out := c.ChangeResourceRecordSetsRequest(input) + err := req.Send() + return out, err +} + +const opChangeTagsForResource = "ChangeTagsForResource" + +// ChangeTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ChangeTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangeTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangeTagsForResourceRequest method. +// req, resp := client.ChangeTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ChangeTagsForResourceRequest(input *ChangeTagsForResourceInput) (req *request.Request, output *ChangeTagsForResourceOutput) { + op := &request.Operation{ + Name: opChangeTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + + if input == nil { + input = &ChangeTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ChangeTagsForResource(input *ChangeTagsForResourceInput) (*ChangeTagsForResourceOutput, error) { + req, out := c.ChangeTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opCreateHealthCheck = "CreateHealthCheck" + +// CreateHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the CreateHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHealthCheckRequest method. +// req, resp := client.CreateHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateHealthCheckRequest(input *CreateHealthCheckInput) (req *request.Request, output *CreateHealthCheckOutput) { + op := &request.Operation{ + Name: opCreateHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck", + } + + if input == nil { + input = &CreateHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHealthCheckOutput{} + req.Data = output + return +} + +// This action creates a new health check. +// +// To create a new health check, send a POST request to the /Route 53 API version/healthcheck +// resource. The request body must include a document with a CreateHealthCheckRequest +// element. The response returns the CreateHealthCheckResponse element that +// contains metadata about the health check. +func (c *Route53) CreateHealthCheck(input *CreateHealthCheckInput) (*CreateHealthCheckOutput, error) { + req, out := c.CreateHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opCreateHostedZone = "CreateHostedZone" + +// CreateHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the CreateHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHostedZoneRequest method. +// req, resp := client.CreateHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *request.Request, output *CreateHostedZoneOutput) { + op := &request.Operation{ + Name: opCreateHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone", + } + + if input == nil { + input = &CreateHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHostedZoneOutput{} + req.Data = output + return +} + +// This action creates a new hosted zone. +// +// To create a new hosted zone, send a POST request to the /Route 53 API version/hostedzone +// resource. The request body must include a document with a CreateHostedZoneRequest +// element. The response returns the CreateHostedZoneResponse element that contains +// metadata about the hosted zone. +// +// Amazon Route 53 automatically creates a default SOA record and four NS records +// for the zone. The NS records in the hosted zone are the name servers you +// give your registrar to delegate your domain to. For more information about +// SOA and NS records, see NS and SOA Records that Amazon Route 53 Creates for +// a Hosted Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) +// in the Amazon Route 53 Developer Guide. +// +// When you create a zone, its initial status is PENDING. This means that it +// is not yet available on all DNS servers. The status of the zone changes to +// INSYNC when the NS and SOA records are available on all Amazon Route 53 DNS +// servers. +// +// When trying to create a hosted zone using a reusable delegation set, you +// could specify an optional DelegationSetId, and Route53 would assign those +// 4 NS records for the zone, instead of alloting a new one. +func (c *Route53) CreateHostedZone(input *CreateHostedZoneInput) (*CreateHostedZoneOutput, error) { + req, out := c.CreateHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opCreateReusableDelegationSet = "CreateReusableDelegationSet" + +// CreateReusableDelegationSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateReusableDelegationSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReusableDelegationSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReusableDelegationSetRequest method. +// req, resp := client.CreateReusableDelegationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateReusableDelegationSetRequest(input *CreateReusableDelegationSetInput) (req *request.Request, output *CreateReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opCreateReusableDelegationSet, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/delegationset", + } + + if input == nil { + input = &CreateReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action creates a reusable delegationSet. +// +// To create a new reusable delegationSet, send a POST request to the /Route +// 53 API version/delegationset resource. The request body must include a document +// with a CreateReusableDelegationSetRequest element. The response returns the +// CreateReusableDelegationSetResponse element that contains metadata about +// the delegationSet. +// +// If the optional parameter HostedZoneId is specified, it marks the delegationSet +// associated with that particular hosted zone as reusable. +func (c *Route53) CreateReusableDelegationSet(input *CreateReusableDelegationSetInput) (*CreateReusableDelegationSetOutput, error) { + req, out := c.CreateReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicy = "CreateTrafficPolicy" + +// CreateTrafficPolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrafficPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrafficPolicyRequest method. +// req, resp := client.CreateTrafficPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateTrafficPolicyRequest(input *CreateTrafficPolicyInput) (req *request.Request, output *CreateTrafficPolicyOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicy, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy", + } + + if input == nil { + input = &CreateTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyOutput{} + req.Data = output + return +} + +// Creates a traffic policy, which you use to create multiple DNS resource record +// sets for one domain name (such as example.com) or one subdomain name (such +// as www.example.com). +// +// To create a traffic policy, send a POST request to the /Route 53 API version/trafficpolicy +// resource. The request body must include a document with a CreateTrafficPolicyRequest +// element. The response includes the CreateTrafficPolicyResponse element, which +// contains information about the new traffic policy. +func (c *Route53) CreateTrafficPolicy(input *CreateTrafficPolicyInput) (*CreateTrafficPolicyOutput, error) { + req, out := c.CreateTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicyInstance = "CreateTrafficPolicyInstance" + +// CreateTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrafficPolicyInstanceRequest method. +// req, resp := client.CreateTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateTrafficPolicyInstanceRequest(input *CreateTrafficPolicyInstanceInput) (req *request.Request, output *CreateTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicyInstance, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicyinstance", + } + + if input == nil { + input = &CreateTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Creates resource record sets in a specified hosted zone based on the settings +// in a specified traffic policy version. In addition, CreateTrafficPolicyInstance +// associates the resource record sets with a specified domain name (such as +// example.com) or subdomain name (such as www.example.com). Amazon Route 53 +// responds to DNS queries for the domain or subdomain name by using the resource +// record sets that CreateTrafficPolicyInstance created. +// +// To create a traffic policy instance, send a POST request to the /Route 53 +// API version/trafficpolicyinstance resource. The request body must include +// a document with a CreateTrafficPolicyRequest element. The response returns +// the CreateTrafficPolicyInstanceResponse element, which contains information +// about the traffic policy instance. +func (c *Route53) CreateTrafficPolicyInstance(input *CreateTrafficPolicyInstanceInput) (*CreateTrafficPolicyInstanceOutput, error) { + req, out := c.CreateTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicyVersion = "CreateTrafficPolicyVersion" + +// CreateTrafficPolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficPolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrafficPolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrafficPolicyVersionRequest method. +// req, resp := client.CreateTrafficPolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateTrafficPolicyVersionRequest(input *CreateTrafficPolicyVersionInput) (req *request.Request, output *CreateTrafficPolicyVersionOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}", + } + + if input == nil { + input = &CreateTrafficPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyVersionOutput{} + req.Data = output + return +} + +// Creates a new version of an existing traffic policy. When you create a new +// version of a traffic policy, you specify the ID of the traffic policy that +// you want to update and a JSON-formatted document that describes the new version. +// +// You use traffic policies to create multiple DNS resource record sets for +// one domain name (such as example.com) or one subdomain name (such as www.example.com). +// +// To create a new version, send a POST request to the /Route 53 API version/trafficpolicy/ +// resource. The request body includes a document with a CreateTrafficPolicyVersionRequest +// element. The response returns the CreateTrafficPolicyVersionResponse element, +// which contains information about the new version of the traffic policy. +func (c *Route53) CreateTrafficPolicyVersion(input *CreateTrafficPolicyVersionInput) (*CreateTrafficPolicyVersionOutput, error) { + req, out := c.CreateTrafficPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHealthCheck = "DeleteHealthCheck" + +// DeleteHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHealthCheckRequest method. +// req, resp := client.DeleteHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DeleteHealthCheckRequest(input *DeleteHealthCheckInput) (req *request.Request, output *DeleteHealthCheckOutput) { + op := &request.Operation{ + Name: opDeleteHealthCheck, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &DeleteHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHealthCheckOutput{} + req.Data = output + return +} + +// This action deletes a health check. To delete a health check, send a DELETE +// request to the /Route 53 API version/healthcheck/health check ID resource. +// +// You can delete a health check only if there are no resource record sets +// associated with this health check. If resource record sets are associated +// with this health check, you must disassociate them before you can delete +// your health check. If you try to delete a health check that is associated +// with resource record sets, Amazon Route 53 will deny your request with a +// HealthCheckInUse error. For information about disassociating the records +// from your health check, see ChangeResourceRecordSets. +func (c *Route53) DeleteHealthCheck(input *DeleteHealthCheckInput) (*DeleteHealthCheckOutput, error) { + req, out := c.DeleteHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHostedZone = "DeleteHostedZone" + +// DeleteHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHostedZoneRequest method. +// req, resp := client.DeleteHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DeleteHostedZoneRequest(input *DeleteHostedZoneInput) (req *request.Request, output *DeleteHostedZoneOutput) { + op := &request.Operation{ + Name: opDeleteHostedZone, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &DeleteHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHostedZoneOutput{} + req.Data = output + return +} + +// This action deletes a hosted zone. To delete a hosted zone, send a DELETE +// request to the /Route 53 API version/hostedzone/hosted zone ID resource. +// +// You can delete a hosted zone only if there are no resource record sets other +// than the default SOA record and NS resource record sets. If your hosted zone +// contains other resource record sets, you must delete them before you can +// delete your hosted zone. If you try to delete a hosted zone that contains +// other resource record sets, Amazon Route 53 will deny your request with a +// HostedZoneNotEmpty error. For information about deleting records from your +// hosted zone, see ChangeResourceRecordSets. +func (c *Route53) DeleteHostedZone(input *DeleteHostedZoneInput) (*DeleteHostedZoneOutput, error) { + req, out := c.DeleteHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReusableDelegationSet = "DeleteReusableDelegationSet" + +// DeleteReusableDelegationSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReusableDelegationSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReusableDelegationSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReusableDelegationSetRequest method. +// req, resp := client.DeleteReusableDelegationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DeleteReusableDelegationSetRequest(input *DeleteReusableDelegationSetInput) (req *request.Request, output *DeleteReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opDeleteReusableDelegationSet, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + + if input == nil { + input = &DeleteReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action deletes a reusable delegation set. To delete a reusable delegation +// set, send a DELETE request to the /Route 53 API version/delegationset/delegation +// set ID resource. +// +// You can delete a reusable delegation set only if there are no associated +// hosted zones. If your reusable delegation set contains associated hosted +// zones, you must delete them before you can delete your reusable delegation +// set. If you try to delete a reusable delegation set that contains associated +// hosted zones, Amazon Route 53 will deny your request with a DelegationSetInUse +// error. +func (c *Route53) DeleteReusableDelegationSet(input *DeleteReusableDelegationSetInput) (*DeleteReusableDelegationSetOutput, error) { + req, out := c.DeleteReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrafficPolicy = "DeleteTrafficPolicy" + +// DeleteTrafficPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTrafficPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTrafficPolicyRequest method. +// req, resp := client.DeleteTrafficPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DeleteTrafficPolicyRequest(input *DeleteTrafficPolicyInput) (req *request.Request, output *DeleteTrafficPolicyOutput) { + op := &request.Operation{ + Name: opDeleteTrafficPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &DeleteTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrafficPolicyOutput{} + req.Data = output + return +} + +// Deletes a traffic policy. To delete a traffic policy, send a DELETE request +// to the /Route 53 API version/trafficpolicy resource. +func (c *Route53) DeleteTrafficPolicy(input *DeleteTrafficPolicyInput) (*DeleteTrafficPolicyOutput, error) { + req, out := c.DeleteTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrafficPolicyInstance = "DeleteTrafficPolicyInstance" + +// DeleteTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTrafficPolicyInstanceRequest method. +// req, resp := client.DeleteTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DeleteTrafficPolicyInstanceRequest(input *DeleteTrafficPolicyInstanceInput) (req *request.Request, output *DeleteTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opDeleteTrafficPolicyInstance, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &DeleteTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Deletes a traffic policy instance and all of the resource record sets that +// Amazon Route 53 created when you created the instance. +// +// To delete a traffic policy instance, send a DELETE request to the /Route +// 53 API version/trafficpolicy/traffic policy instance ID resource. +// +// When you delete a traffic policy instance, Amazon Route 53 also deletes +// all of the resource record sets that were created when you created the traffic +// policy instance. +func (c *Route53) DeleteTrafficPolicyInstance(input *DeleteTrafficPolicyInstanceInput) (*DeleteTrafficPolicyInstanceOutput, error) { + req, out := c.DeleteTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateVPCFromHostedZone = "DisassociateVPCFromHostedZone" + +// DisassociateVPCFromHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateVPCFromHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisassociateVPCFromHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisassociateVPCFromHostedZoneRequest method. +// req, resp := client.DisassociateVPCFromHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DisassociateVPCFromHostedZoneRequest(input *DisassociateVPCFromHostedZoneInput) (req *request.Request, output *DisassociateVPCFromHostedZoneOutput) { + op := &request.Operation{ + Name: opDisassociateVPCFromHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/disassociatevpc", + } + + if input == nil { + input = &DisassociateVPCFromHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &DisassociateVPCFromHostedZoneOutput{} + req.Data = output + return +} + +// This action disassociates a VPC from an hosted zone. +// +// To disassociate a VPC to a hosted zone, send a POST request to the /Route +// 53 API version/hostedzone/hosted zone ID/disassociatevpc resource. The request +// body must include a document with a DisassociateVPCFromHostedZoneRequest +// element. The response returns the DisassociateVPCFromHostedZoneResponse element +// that contains ChangeInfo for you to track the progress of the DisassociateVPCFromHostedZoneRequest +// you made. See GetChange operation for how to track the progress of your change. +func (c *Route53) DisassociateVPCFromHostedZone(input *DisassociateVPCFromHostedZoneInput) (*DisassociateVPCFromHostedZoneOutput, error) { + req, out := c.DisassociateVPCFromHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opGetChange = "GetChange" + +// GetChangeRequest generates a "aws/request.Request" representing the +// client's request for the GetChange operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetChange method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetChangeRequest method. +// req, resp := client.GetChangeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetChangeRequest(input *GetChangeInput) (req *request.Request, output *GetChangeOutput) { + op := &request.Operation{ + Name: opGetChange, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/change/{Id}", + } + + if input == nil { + input = &GetChangeInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeOutput{} + req.Data = output + return +} + +// This action returns the current status of a change batch request. The status +// is one of the following values: +// +// - PENDING indicates that the changes in this request have not replicated +// to all Amazon Route 53 DNS servers. This is the initial status of all change +// batch requests. +// +// - INSYNC indicates that the changes have replicated to all Amazon Route +// 53 DNS servers. +func (c *Route53) GetChange(input *GetChangeInput) (*GetChangeOutput, error) { + req, out := c.GetChangeRequest(input) + err := req.Send() + return out, err +} + +const opGetChangeDetails = "GetChangeDetails" + +// GetChangeDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetChangeDetails operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetChangeDetails method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetChangeDetailsRequest method. +// req, resp := client.GetChangeDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetChangeDetailsRequest(input *GetChangeDetailsInput) (req *request.Request, output *GetChangeDetailsOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetChangeDetails, has been deprecated") + } + op := &request.Operation{ + Name: opGetChangeDetails, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/changedetails/{Id}", + } + + if input == nil { + input = &GetChangeDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeDetailsOutput{} + req.Data = output + return +} + +// This action returns the status and changes of a change batch request. +func (c *Route53) GetChangeDetails(input *GetChangeDetailsInput) (*GetChangeDetailsOutput, error) { + req, out := c.GetChangeDetailsRequest(input) + err := req.Send() + return out, err +} + +const opGetCheckerIpRanges = "GetCheckerIpRanges" + +// GetCheckerIpRangesRequest generates a "aws/request.Request" representing the +// client's request for the GetCheckerIpRanges operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCheckerIpRanges method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCheckerIpRangesRequest method. +// req, resp := client.GetCheckerIpRangesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetCheckerIpRangesRequest(input *GetCheckerIpRangesInput) (req *request.Request, output *GetCheckerIpRangesOutput) { + op := &request.Operation{ + Name: opGetCheckerIpRanges, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/checkeripranges", + } + + if input == nil { + input = &GetCheckerIpRangesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCheckerIpRangesOutput{} + req.Data = output + return +} + +// To retrieve a list of the IP ranges used by Amazon Route 53 health checkers +// to check the health of your resources, send a GET request to the /Route 53 +// API version/checkeripranges resource. You can use these IP addresses to configure +// router and firewall rules to allow health checkers to check the health of +// your resources. +func (c *Route53) GetCheckerIpRanges(input *GetCheckerIpRangesInput) (*GetCheckerIpRangesOutput, error) { + req, out := c.GetCheckerIpRangesRequest(input) + err := req.Send() + return out, err +} + +const opGetGeoLocation = "GetGeoLocation" + +// GetGeoLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetGeoLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetGeoLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetGeoLocationRequest method. +// req, resp := client.GetGeoLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetGeoLocationRequest(input *GetGeoLocationInput) (req *request.Request, output *GetGeoLocationOutput) { + op := &request.Operation{ + Name: opGetGeoLocation, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocation", + } + + if input == nil { + input = &GetGeoLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGeoLocationOutput{} + req.Data = output + return +} + +// To retrieve a single geo location, send a GET request to the /Route 53 API +// version/geolocation resource with one of these options: continentcode | countrycode +// | countrycode and subdivisioncode. +func (c *Route53) GetGeoLocation(input *GetGeoLocationInput) (*GetGeoLocationOutput, error) { + req, out := c.GetGeoLocationRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheck = "GetHealthCheck" + +// GetHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckRequest method. +// req, resp := client.GetHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHealthCheckRequest(input *GetHealthCheckInput) (req *request.Request, output *GetHealthCheckOutput) { + op := &request.Operation{ + Name: opGetHealthCheck, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &GetHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckOutput{} + req.Data = output + return +} + +// To retrieve the health check, send a GET request to the /Route 53 API version/healthcheck/health +// check ID resource. +func (c *Route53) GetHealthCheck(input *GetHealthCheckInput) (*GetHealthCheckOutput, error) { + req, out := c.GetHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckCount = "GetHealthCheckCount" + +// GetHealthCheckCountRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheckCount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheckCount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckCountRequest method. +// req, resp := client.GetHealthCheckCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHealthCheckCountRequest(input *GetHealthCheckCountInput) (req *request.Request, output *GetHealthCheckCountOutput) { + op := &request.Operation{ + Name: opGetHealthCheckCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheckcount", + } + + if input == nil { + input = &GetHealthCheckCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your health checks, send a GET request to the +// /Route 53 API version/healthcheckcount resource. +func (c *Route53) GetHealthCheckCount(input *GetHealthCheckCountInput) (*GetHealthCheckCountOutput, error) { + req, out := c.GetHealthCheckCountRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckLastFailureReason = "GetHealthCheckLastFailureReason" + +// GetHealthCheckLastFailureReasonRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheckLastFailureReason operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheckLastFailureReason method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckLastFailureReasonRequest method. +// req, resp := client.GetHealthCheckLastFailureReasonRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHealthCheckLastFailureReasonRequest(input *GetHealthCheckLastFailureReasonInput) (req *request.Request, output *GetHealthCheckLastFailureReasonOutput) { + op := &request.Operation{ + Name: opGetHealthCheckLastFailureReason, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/lastfailurereason", + } + + if input == nil { + input = &GetHealthCheckLastFailureReasonInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckLastFailureReasonOutput{} + req.Data = output + return +} + +// If you want to learn why a health check is currently failing or why it failed +// most recently (if at all), you can get the failure reason for the most recent +// failure. Send a GET request to the /Route 53 API version/healthcheck/health +// check ID/lastfailurereason resource. +func (c *Route53) GetHealthCheckLastFailureReason(input *GetHealthCheckLastFailureReasonInput) (*GetHealthCheckLastFailureReasonOutput, error) { + req, out := c.GetHealthCheckLastFailureReasonRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckStatus = "GetHealthCheckStatus" + +// GetHealthCheckStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheckStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheckStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckStatusRequest method. +// req, resp := client.GetHealthCheckStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHealthCheckStatusRequest(input *GetHealthCheckStatusInput) (req *request.Request, output *GetHealthCheckStatusOutput) { + op := &request.Operation{ + Name: opGetHealthCheckStatus, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/status", + } + + if input == nil { + input = &GetHealthCheckStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckStatusOutput{} + req.Data = output + return +} + +// To retrieve the health check status, send a GET request to the /Route 53 +// API version/healthcheck/health check ID/status resource. You can use this +// call to get a health check's current status. +func (c *Route53) GetHealthCheckStatus(input *GetHealthCheckStatusInput) (*GetHealthCheckStatusOutput, error) { + req, out := c.GetHealthCheckStatusRequest(input) + err := req.Send() + return out, err +} + +const opGetHostedZone = "GetHostedZone" + +// GetHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the GetHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHostedZoneRequest method. +// req, resp := client.GetHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHostedZoneRequest(input *GetHostedZoneInput) (req *request.Request, output *GetHostedZoneOutput) { + op := &request.Operation{ + Name: opGetHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &GetHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostedZoneOutput{} + req.Data = output + return +} + +// To retrieve the delegation set for a hosted zone, send a GET request to the +// /Route 53 API version/hostedzone/hosted zone ID resource. The delegation +// set is the four Amazon Route 53 name servers that were assigned to the hosted +// zone when you created it. +func (c *Route53) GetHostedZone(input *GetHostedZoneInput) (*GetHostedZoneOutput, error) { + req, out := c.GetHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opGetHostedZoneCount = "GetHostedZoneCount" + +// GetHostedZoneCountRequest generates a "aws/request.Request" representing the +// client's request for the GetHostedZoneCount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHostedZoneCount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHostedZoneCountRequest method. +// req, resp := client.GetHostedZoneCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHostedZoneCountRequest(input *GetHostedZoneCountInput) (req *request.Request, output *GetHostedZoneCountOutput) { + op := &request.Operation{ + Name: opGetHostedZoneCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonecount", + } + + if input == nil { + input = &GetHostedZoneCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostedZoneCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your hosted zones, send a GET request to the /Route +// 53 API version/hostedzonecount resource. +func (c *Route53) GetHostedZoneCount(input *GetHostedZoneCountInput) (*GetHostedZoneCountOutput, error) { + req, out := c.GetHostedZoneCountRequest(input) + err := req.Send() + return out, err +} + +const opGetReusableDelegationSet = "GetReusableDelegationSet" + +// GetReusableDelegationSetRequest generates a "aws/request.Request" representing the +// client's request for the GetReusableDelegationSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetReusableDelegationSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetReusableDelegationSetRequest method. +// req, resp := client.GetReusableDelegationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetReusableDelegationSetRequest(input *GetReusableDelegationSetInput) (req *request.Request, output *GetReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opGetReusableDelegationSet, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + + if input == nil { + input = &GetReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetReusableDelegationSetOutput{} + req.Data = output + return +} + +// To retrieve the reusable delegation set, send a GET request to the /Route +// 53 API version/delegationset/delegation set ID resource. +func (c *Route53) GetReusableDelegationSet(input *GetReusableDelegationSetInput) (*GetReusableDelegationSetOutput, error) { + req, out := c.GetReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicy = "GetTrafficPolicy" + +// GetTrafficPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetTrafficPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTrafficPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTrafficPolicyRequest method. +// req, resp := client.GetTrafficPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetTrafficPolicyRequest(input *GetTrafficPolicyInput) (req *request.Request, output *GetTrafficPolicyOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicy, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &GetTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyOutput{} + req.Data = output + return +} + +// Gets information about a specific traffic policy version. To get the information, +// send a GET request to the /Route 53 API version/trafficpolicy resource. +func (c *Route53) GetTrafficPolicy(input *GetTrafficPolicyInput) (*GetTrafficPolicyOutput, error) { + req, out := c.GetTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicyInstance = "GetTrafficPolicyInstance" + +// GetTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the GetTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTrafficPolicyInstanceRequest method. +// req, resp := client.GetTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetTrafficPolicyInstanceRequest(input *GetTrafficPolicyInstanceInput) (req *request.Request, output *GetTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicyInstance, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &GetTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Gets information about a specified traffic policy instance. +// +// To get information about the traffic policy instance, send a GET request +// to the /Route 53 API version/trafficpolicyinstance resource. +// +// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request, there's a brief delay while Amazon Route 53 creates the resource +// record sets that are specified in the traffic policy definition. For more +// information, see the State response element. +func (c *Route53) GetTrafficPolicyInstance(input *GetTrafficPolicyInstanceInput) (*GetTrafficPolicyInstanceOutput, error) { + req, out := c.GetTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicyInstanceCount = "GetTrafficPolicyInstanceCount" + +// GetTrafficPolicyInstanceCountRequest generates a "aws/request.Request" representing the +// client's request for the GetTrafficPolicyInstanceCount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTrafficPolicyInstanceCount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTrafficPolicyInstanceCountRequest method. +// req, resp := client.GetTrafficPolicyInstanceCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetTrafficPolicyInstanceCountRequest(input *GetTrafficPolicyInstanceCountInput) (req *request.Request, output *GetTrafficPolicyInstanceCountOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicyInstanceCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstancecount", + } + + if input == nil { + input = &GetTrafficPolicyInstanceCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyInstanceCountOutput{} + req.Data = output + return +} + +// Gets the number of traffic policy instances that are associated with the +// current AWS account. +// +// To get the number of traffic policy instances, send a GET request to the +// /Route 53 API version/trafficpolicyinstancecount resource. +func (c *Route53) GetTrafficPolicyInstanceCount(input *GetTrafficPolicyInstanceCountInput) (*GetTrafficPolicyInstanceCountOutput, error) { + req, out := c.GetTrafficPolicyInstanceCountRequest(input) + err := req.Send() + return out, err +} + +const opListChangeBatchesByHostedZone = "ListChangeBatchesByHostedZone" + +// ListChangeBatchesByHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the ListChangeBatchesByHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListChangeBatchesByHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListChangeBatchesByHostedZoneRequest method. +// req, resp := client.ListChangeBatchesByHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListChangeBatchesByHostedZoneRequest(input *ListChangeBatchesByHostedZoneInput) (req *request.Request, output *ListChangeBatchesByHostedZoneOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, ListChangeBatchesByHostedZone, has been deprecated") + } + op := &request.Operation{ + Name: opListChangeBatchesByHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/changes", + } + + if input == nil { + input = &ListChangeBatchesByHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &ListChangeBatchesByHostedZoneOutput{} + req.Data = output + return +} + +// This action gets the list of ChangeBatches in a given time period for a given +// hosted zone. +func (c *Route53) ListChangeBatchesByHostedZone(input *ListChangeBatchesByHostedZoneInput) (*ListChangeBatchesByHostedZoneOutput, error) { + req, out := c.ListChangeBatchesByHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opListChangeBatchesByRRSet = "ListChangeBatchesByRRSet" + +// ListChangeBatchesByRRSetRequest generates a "aws/request.Request" representing the +// client's request for the ListChangeBatchesByRRSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListChangeBatchesByRRSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListChangeBatchesByRRSetRequest method. +// req, resp := client.ListChangeBatchesByRRSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListChangeBatchesByRRSetRequest(input *ListChangeBatchesByRRSetInput) (req *request.Request, output *ListChangeBatchesByRRSetOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, ListChangeBatchesByRRSet, has been deprecated") + } + op := &request.Operation{ + Name: opListChangeBatchesByRRSet, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrsChanges", + } + + if input == nil { + input = &ListChangeBatchesByRRSetInput{} + } + + req = c.newRequest(op, input, output) + output = &ListChangeBatchesByRRSetOutput{} + req.Data = output + return +} + +// This action gets the list of ChangeBatches in a given time period for a given +// hosted zone and RRSet. +func (c *Route53) ListChangeBatchesByRRSet(input *ListChangeBatchesByRRSetInput) (*ListChangeBatchesByRRSetOutput, error) { + req, out := c.ListChangeBatchesByRRSetRequest(input) + err := req.Send() + return out, err +} + +const opListGeoLocations = "ListGeoLocations" + +// ListGeoLocationsRequest generates a "aws/request.Request" representing the +// client's request for the ListGeoLocations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGeoLocations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGeoLocationsRequest method. +// req, resp := client.ListGeoLocationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListGeoLocationsRequest(input *ListGeoLocationsInput) (req *request.Request, output *ListGeoLocationsOutput) { + op := &request.Operation{ + Name: opListGeoLocations, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocations", + } + + if input == nil { + input = &ListGeoLocationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGeoLocationsOutput{} + req.Data = output + return +} + +// To retrieve a list of supported geo locations, send a GET request to the +// /Route 53 API version/geolocations resource. The response to this request +// includes a GeoLocationDetailsList element with zero, one, or multiple GeoLocationDetails +// child elements. The list is sorted by country code, and then subdivision +// code, followed by continents at the end of the list. +// +// By default, the list of geo locations is displayed on a single page. You +// can control the length of the page that is displayed by using the MaxItems +// parameter. If the list is truncated, IsTruncated will be set to true and +// a combination of NextContinentCode, NextCountryCode, NextSubdivisionCode +// will be populated. You can pass these as parameters to StartContinentCode, +// StartCountryCode, StartSubdivisionCode to control the geo location that the +// list begins with. +func (c *Route53) ListGeoLocations(input *ListGeoLocationsInput) (*ListGeoLocationsOutput, error) { + req, out := c.ListGeoLocationsRequest(input) + err := req.Send() + return out, err +} + +const opListHealthChecks = "ListHealthChecks" + +// ListHealthChecksRequest generates a "aws/request.Request" representing the +// client's request for the ListHealthChecks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListHealthChecks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListHealthChecksRequest method. +// req, resp := client.ListHealthChecksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListHealthChecksRequest(input *ListHealthChecksInput) (req *request.Request, output *ListHealthChecksOutput) { + op := &request.Operation{ + Name: opListHealthChecks, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListHealthChecksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHealthChecksOutput{} + req.Data = output + return +} + +// To retrieve a list of your health checks, send a GET request to the /Route +// 53 API version/healthcheck resource. The response to this request includes +// a HealthChecks element with zero, one, or multiple HealthCheck child elements. +// By default, the list of health checks is displayed on a single page. You +// can control the length of the page that is displayed by using the MaxItems +// parameter. You can use the Marker parameter to control the health check that +// the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHealthChecks(input *ListHealthChecksInput) (*ListHealthChecksOutput, error) { + req, out := c.ListHealthChecksRequest(input) + err := req.Send() + return out, err +} + +// ListHealthChecksPages iterates over the pages of a ListHealthChecks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListHealthChecks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListHealthChecks operation. +// pageNum := 0 +// err := client.ListHealthChecksPages(params, +// func(page *ListHealthChecksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53) ListHealthChecksPages(input *ListHealthChecksInput, fn func(p *ListHealthChecksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListHealthChecksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListHealthChecksOutput), lastPage) + }) +} + +const opListHostedZones = "ListHostedZones" + +// ListHostedZonesRequest generates a "aws/request.Request" representing the +// client's request for the ListHostedZones operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListHostedZones method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListHostedZonesRequest method. +// req, resp := client.ListHostedZonesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListHostedZonesRequest(input *ListHostedZonesInput) (req *request.Request, output *ListHostedZonesOutput) { + op := &request.Operation{ + Name: opListHostedZones, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListHostedZonesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHostedZonesOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones, send a GET request to the /Route +// 53 API version/hostedzone resource. The response to this request includes +// a HostedZones element with zero, one, or multiple HostedZone child elements. +// By default, the list of hosted zones is displayed on a single page. You can +// control the length of the page that is displayed by using the MaxItems parameter. +// You can use the Marker parameter to control the hosted zone that the list +// begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZones(input *ListHostedZonesInput) (*ListHostedZonesOutput, error) { + req, out := c.ListHostedZonesRequest(input) + err := req.Send() + return out, err +} + +// ListHostedZonesPages iterates over the pages of a ListHostedZones operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListHostedZones method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListHostedZones operation. +// pageNum := 0 +// err := client.ListHostedZonesPages(params, +// func(page *ListHostedZonesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53) ListHostedZonesPages(input *ListHostedZonesInput, fn func(p *ListHostedZonesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListHostedZonesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListHostedZonesOutput), lastPage) + }) +} + +const opListHostedZonesByName = "ListHostedZonesByName" + +// ListHostedZonesByNameRequest generates a "aws/request.Request" representing the +// client's request for the ListHostedZonesByName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListHostedZonesByName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListHostedZonesByNameRequest method. +// req, resp := client.ListHostedZonesByNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListHostedZonesByNameRequest(input *ListHostedZonesByNameInput) (req *request.Request, output *ListHostedZonesByNameOutput) { + op := &request.Operation{ + Name: opListHostedZonesByName, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonesbyname", + } + + if input == nil { + input = &ListHostedZonesByNameInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHostedZonesByNameOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the /Route 53 API version/hostedzonesbyname resource. The response +// to this request includes a HostedZones element with zero or more HostedZone +// child elements lexicographically ordered by DNS name. By default, the list +// of hosted zones is displayed on a single page. You can control the length +// of the page that is displayed by using the MaxItems parameter. You can use +// the DNSName and HostedZoneId parameters to control the hosted zone that the +// list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZonesByName(input *ListHostedZonesByNameInput) (*ListHostedZonesByNameOutput, error) { + req, out := c.ListHostedZonesByNameRequest(input) + err := req.Send() + return out, err +} + +const opListResourceRecordSets = "ListResourceRecordSets" + +// ListResourceRecordSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListResourceRecordSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListResourceRecordSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListResourceRecordSetsRequest method. +// req, resp := client.ListResourceRecordSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListResourceRecordSetsRequest(input *ListResourceRecordSetsInput) (req *request.Request, output *ListResourceRecordSetsOutput) { + op := &request.Operation{ + Name: opListResourceRecordSets, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset", + Paginator: &request.Paginator{ + InputTokens: []string{"StartRecordName", "StartRecordType", "StartRecordIdentifier"}, + OutputTokens: []string{"NextRecordName", "NextRecordType", "NextRecordIdentifier"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListResourceRecordSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListResourceRecordSetsOutput{} + req.Data = output + return +} + +// List the resource record sets in a specified hosted zone. Send a GET request +// to the 2013-04-01/hostedzone/hosted zone ID/rrset resource. +// +// ListResourceRecordSets returns up to 100 resource record sets at a time +// in ASCII order, beginning at a position specified by the name and type elements. +// The action sorts results first by DNS name with the labels reversed, for +// example: +// +// com.example.www. +// +// Note the trailing dot, which can change the sort order in some circumstances. +// When multiple records have the same DNS name, the action sorts results by +// the record type. +// +// You can use the name and type elements to adjust the beginning position +// of the list of resource record sets returned: +// +// If you do not specify Name or Type: The results begin with the first resource +// record set that the hosted zone contains. If you specify Name but not Type: +// The results begin with the first resource record set in the list whose name +// is greater than or equal to Name. If you specify Type but not Name: Amazon +// Route 53 returns the InvalidInput error. If you specify both Name and Type: +// The results begin with the first resource record set in the list whose name +// is greater than or equal to Name, and whose type is greater than or equal +// to Type. This action returns the most current version of the records. This +// includes records that are PENDING, and that are not yet available on all +// Amazon Route 53 DNS servers. +// +// To ensure that you get an accurate listing of the resource record sets for +// a hosted zone at a point in time, do not submit a ChangeResourceRecordSets +// request while you are paging through the results of a ListResourceRecordSets +// request. If you do, some pages may display results without the latest changes +// while other pages display results with the latest changes. +func (c *Route53) ListResourceRecordSets(input *ListResourceRecordSetsInput) (*ListResourceRecordSetsOutput, error) { + req, out := c.ListResourceRecordSetsRequest(input) + err := req.Send() + return out, err +} + +// ListResourceRecordSetsPages iterates over the pages of a ListResourceRecordSets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListResourceRecordSets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListResourceRecordSets operation. +// pageNum := 0 +// err := client.ListResourceRecordSetsPages(params, +// func(page *ListResourceRecordSetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53) ListResourceRecordSetsPages(input *ListResourceRecordSetsInput, fn func(p *ListResourceRecordSetsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListResourceRecordSetsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListResourceRecordSetsOutput), lastPage) + }) +} + +const opListReusableDelegationSets = "ListReusableDelegationSets" + +// ListReusableDelegationSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListReusableDelegationSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListReusableDelegationSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListReusableDelegationSetsRequest method. +// req, resp := client.ListReusableDelegationSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListReusableDelegationSetsRequest(input *ListReusableDelegationSetsInput) (req *request.Request, output *ListReusableDelegationSetsOutput) { + op := &request.Operation{ + Name: opListReusableDelegationSets, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset", + } + + if input == nil { + input = &ListReusableDelegationSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListReusableDelegationSetsOutput{} + req.Data = output + return +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the /Route 53 API version/delegationset resource. The response to this request +// includes a DelegationSets element with zero, one, or multiple DelegationSet +// child elements. By default, the list of delegation sets is displayed on a +// single page. You can control the length of the page that is displayed by +// using the MaxItems parameter. You can use the Marker parameter to control +// the delegation set that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListReusableDelegationSets(input *ListReusableDelegationSetsInput) (*ListReusableDelegationSetsOutput, error) { + req, out := c.ListReusableDelegationSetsRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResources = "ListTagsForResources" + +// ListTagsForResourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourcesRequest method. +// req, resp := client.ListTagsForResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTagsForResourcesRequest(input *ListTagsForResourcesInput) (req *request.Request, output *ListTagsForResourcesOutput) { + op := &request.Operation{ + Name: opListTagsForResources, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}", + } + + if input == nil { + input = &ListTagsForResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourcesOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResources(input *ListTagsForResourcesInput) (*ListTagsForResourcesOutput, error) { + req, out := c.ListTagsForResourcesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicies = "ListTrafficPolicies" + +// ListTrafficPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPoliciesRequest method. +// req, resp := client.ListTrafficPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTrafficPoliciesRequest(input *ListTrafficPoliciesInput) (req *request.Request, output *ListTrafficPoliciesOutput) { + op := &request.Operation{ + Name: opListTrafficPolicies, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicies", + } + + if input == nil { + input = &ListTrafficPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPoliciesOutput{} + req.Data = output + return +} + +// Gets information about the latest version for every traffic policy that is +// associated with the current AWS account. To get the information, send a GET +// request to the /Route 53 API version/trafficpolicy resource. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policies, you can use the maxitems parameter to list +// them in groups of up to 100. +// +// The response includes three values that help you navigate from one group +// of maxitems traffic policies to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policies associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// that is associated with the current account. +// +// TrafficPolicyIdMarker If IsTruncated is true, TrafficPolicyIdMarker is the +// ID of the first traffic policy in the next group of MaxItems traffic policies. +// If you want to list more traffic policies, make another call to ListTrafficPolicies, +// and specify the value of the TrafficPolicyIdMarker element from the response +// in the TrafficPolicyIdMarker request parameter. +// +// If IsTruncated is false, the TrafficPolicyIdMarker element is omitted from +// the response. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +func (c *Route53) ListTrafficPolicies(input *ListTrafficPoliciesInput) (*ListTrafficPoliciesOutput, error) { + req, out := c.ListTrafficPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstances = "ListTrafficPolicyInstances" + +// ListTrafficPolicyInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyInstancesRequest method. +// req, resp := client.ListTrafficPolicyInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTrafficPolicyInstancesRequest(input *ListTrafficPolicyInstancesInput) (req *request.Request, output *ListTrafficPolicyInstancesOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstances, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances", + } + + if input == nil { + input = &ListTrafficPolicyInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created by using +// the current AWS account. +// +// After you submit an UpdateTrafficPolicyInstance request, there's a brief +// delay while Amazon Route 53 creates the resource record sets that are specified +// in the traffic policy definition. For more information, see the State response +// element. To get information about the traffic policy instances that are associated +// with the current AWS account, send a GET request to the /Route 53 API version/trafficpolicyinstance +// resource. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes five values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the current account. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker +// If IsTruncated is true, these three values in the response represent the +// first traffic policy instance in the next group of MaxItems traffic policy +// instances. To list more traffic policy instances, make another call to ListTrafficPolicyInstances, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstances(input *ListTrafficPolicyInstancesInput) (*ListTrafficPolicyInstancesOutput, error) { + req, out := c.ListTrafficPolicyInstancesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstancesByHostedZone = "ListTrafficPolicyInstancesByHostedZone" + +// ListTrafficPolicyInstancesByHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyInstancesByHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyInstancesByHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyInstancesByHostedZoneRequest method. +// req, resp := client.ListTrafficPolicyInstancesByHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTrafficPolicyInstancesByHostedZoneRequest(input *ListTrafficPolicyInstancesByHostedZoneInput) (req *request.Request, output *ListTrafficPolicyInstancesByHostedZoneOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstancesByHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances/hostedzone", + } + + if input == nil { + input = &ListTrafficPolicyInstancesByHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesByHostedZoneOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created in a +// specified hosted zone. +// +// After you submit an UpdateTrafficPolicyInstance request, there's a brief +// delay while Amazon Route 53 creates the resource record sets that are specified +// in the traffic policy definition. For more information, see the State response +// element. To get information about the traffic policy instances that you created +// in a specified hosted zone, send a GET request to the /Route 53 API version/trafficpolicyinstance +// resource and include the ID of the hosted zone. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes four values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the current account. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// TrafficPolicyInstanceNameMarker and TrafficPolicyInstanceTypeMarker If IsTruncated +// is true, these two values in the response represent the first traffic policy +// instance in the next group of MaxItems traffic policy instances. To list +// more traffic policy instances, make another call to ListTrafficPolicyInstancesByHostedZone, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstancesByHostedZone(input *ListTrafficPolicyInstancesByHostedZoneInput) (*ListTrafficPolicyInstancesByHostedZoneOutput, error) { + req, out := c.ListTrafficPolicyInstancesByHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstancesByPolicy = "ListTrafficPolicyInstancesByPolicy" + +// ListTrafficPolicyInstancesByPolicyRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyInstancesByPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyInstancesByPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyInstancesByPolicyRequest method. +// req, resp := client.ListTrafficPolicyInstancesByPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTrafficPolicyInstancesByPolicyRequest(input *ListTrafficPolicyInstancesByPolicyInput) (req *request.Request, output *ListTrafficPolicyInstancesByPolicyOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstancesByPolicy, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances/trafficpolicy", + } + + if input == nil { + input = &ListTrafficPolicyInstancesByPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesByPolicyOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created by using +// a specify traffic policy version. +// +// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request, there's a brief delay while Amazon Route 53 creates the resource +// record sets that are specified in the traffic policy definition. For more +// information, see the State response element. To get information about the +// traffic policy instances that you created by using a specify traffic policy +// version, send a GET request to the /Route 53 API version/trafficpolicyinstance +// resource and include the ID and version of the traffic policy. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes five values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the specified traffic policy. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the specified traffic policy. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker +// If IsTruncated is true, these values in the response represent the first +// traffic policy instance in the next group of MaxItems traffic policy instances. +// To list more traffic policy instances, make another call to ListTrafficPolicyInstancesByPolicy, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstancesByPolicy(input *ListTrafficPolicyInstancesByPolicyInput) (*ListTrafficPolicyInstancesByPolicyOutput, error) { + req, out := c.ListTrafficPolicyInstancesByPolicyRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyVersions = "ListTrafficPolicyVersions" + +// ListTrafficPolicyVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyVersionsRequest method. +// req, resp := client.ListTrafficPolicyVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTrafficPolicyVersionsRequest(input *ListTrafficPolicyVersionsInput) (req *request.Request, output *ListTrafficPolicyVersionsOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyVersions, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicies/{Id}/versions", + } + + if input == nil { + input = &ListTrafficPolicyVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyVersionsOutput{} + req.Data = output + return +} + +// Gets information about all of the versions for a specified traffic policy. +// ListTrafficPolicyVersions lists only versions that have not been deleted. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policies, you can use the maxitems parameter to list +// them in groups of up to 100. +// +// The response includes three values that help you navigate from one group +// of maxitemsmaxitems traffic policies to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy versions associated with the specified traffic policy. +// +// If IsTruncated is false, this response includes the last traffic policy +// version that is associated with the specified traffic policy. +// +// TrafficPolicyVersionMarker The ID of the next traffic policy version that +// is associated with the current AWS account. If you want to list more traffic +// policies, make another call to ListTrafficPolicyVersions, and specify the +// value of the TrafficPolicyVersionMarker element in the TrafficPolicyVersionMarker +// request parameter. +// +// If IsTruncated is false, Amazon Route 53 omits the TrafficPolicyVersionMarker +// element from the response. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +func (c *Route53) ListTrafficPolicyVersions(input *ListTrafficPolicyVersionsInput) (*ListTrafficPolicyVersionsOutput, error) { + req, out := c.ListTrafficPolicyVersionsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateHealthCheck = "UpdateHealthCheck" + +// UpdateHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the UpdateHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateHealthCheckRequest method. +// req, resp := client.UpdateHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) UpdateHealthCheckRequest(input *UpdateHealthCheckInput) (req *request.Request, output *UpdateHealthCheckOutput) { + op := &request.Operation{ + Name: opUpdateHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &UpdateHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateHealthCheckOutput{} + req.Data = output + return +} + +// This action updates an existing health check. +// +// To update a health check, send a POST request to the /Route 53 API version/healthcheck/health +// check ID resource. The request body must include a document with an UpdateHealthCheckRequest +// element. The response returns an UpdateHealthCheckResponse element, which +// contains metadata about the health check. +func (c *Route53) UpdateHealthCheck(input *UpdateHealthCheckInput) (*UpdateHealthCheckOutput, error) { + req, out := c.UpdateHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opUpdateHostedZoneComment = "UpdateHostedZoneComment" + +// UpdateHostedZoneCommentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateHostedZoneComment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateHostedZoneComment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateHostedZoneCommentRequest method. +// req, resp := client.UpdateHostedZoneCommentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) UpdateHostedZoneCommentRequest(input *UpdateHostedZoneCommentInput) (req *request.Request, output *UpdateHostedZoneCommentOutput) { + op := &request.Operation{ + Name: opUpdateHostedZoneComment, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &UpdateHostedZoneCommentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateHostedZoneCommentOutput{} + req.Data = output + return +} + +// To update the hosted zone comment, send a POST request to the /Route 53 API +// version/hostedzone/hosted zone ID resource. The request body must include +// a document with a UpdateHostedZoneCommentRequest element. The response to +// this request includes the modified HostedZone element. +// +// The comment can have a maximum length of 256 characters. +func (c *Route53) UpdateHostedZoneComment(input *UpdateHostedZoneCommentInput) (*UpdateHostedZoneCommentOutput, error) { + req, out := c.UpdateHostedZoneCommentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrafficPolicyComment = "UpdateTrafficPolicyComment" + +// UpdateTrafficPolicyCommentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTrafficPolicyComment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateTrafficPolicyComment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateTrafficPolicyCommentRequest method. +// req, resp := client.UpdateTrafficPolicyCommentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) UpdateTrafficPolicyCommentRequest(input *UpdateTrafficPolicyCommentInput) (req *request.Request, output *UpdateTrafficPolicyCommentOutput) { + op := &request.Operation{ + Name: opUpdateTrafficPolicyComment, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &UpdateTrafficPolicyCommentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrafficPolicyCommentOutput{} + req.Data = output + return +} + +// Updates the comment for a specified traffic policy version. +// +// To update the comment, send a POST request to the /Route 53 API version/trafficpolicy/ +// resource. +// +// The request body must include a document with an UpdateTrafficPolicyCommentRequest +// element. +func (c *Route53) UpdateTrafficPolicyComment(input *UpdateTrafficPolicyCommentInput) (*UpdateTrafficPolicyCommentOutput, error) { + req, out := c.UpdateTrafficPolicyCommentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrafficPolicyInstance = "UpdateTrafficPolicyInstance" + +// UpdateTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateTrafficPolicyInstanceRequest method. +// req, resp := client.UpdateTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) UpdateTrafficPolicyInstanceRequest(input *UpdateTrafficPolicyInstanceInput) (req *request.Request, output *UpdateTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opUpdateTrafficPolicyInstance, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &UpdateTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Updates the resource record sets in a specified hosted zone that were created +// based on the settings in a specified traffic policy version. +// +// The DNS type of the resource record sets that you're updating must match +// the DNS type in the JSON document that is associated with the traffic policy +// version that you're using to update the traffic policy instance. When you +// update a traffic policy instance, Amazon Route 53 continues to respond to +// DNS queries for the root resource record set name (such as example.com) while +// it replaces one group of resource record sets with another. Amazon Route +// 53 performs the following operations: +// +// Amazon Route 53 creates a new group of resource record sets based on the +// specified traffic policy. This is true regardless of how substantial the +// differences are between the existing resource record sets and the new resource +// record sets. When all of the new resource record sets have been created, +// Amazon Route 53 starts to respond to DNS queries for the root resource record +// set name (such as example.com) by using the new resource record sets. Amazon +// Route 53 deletes the old group of resource record sets that are associated +// with the root resource record set name. To update a traffic policy instance, +// send a POST request to the /Route 53 API version/trafficpolicyinstance/traffic +// policy ID resource. The request body must include a document with an UpdateTrafficPolicyInstanceRequest +// element. +func (c *Route53) UpdateTrafficPolicyInstance(input *UpdateTrafficPolicyInstanceInput) (*UpdateTrafficPolicyInstanceOutput, error) { + req, out := c.UpdateTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +// A complex type that contains information to uniquely identify the CloudWatch +// alarm that you're associating with a Route 53 health check. +type AlarmIdentifier struct { + _ struct{} `type:"structure"` + + // The name of the CloudWatch alarm. + Name *string `min:"1" type:"string" required:"true"` + + // The CloudWatchRegion that the CloudWatch alarm was created in. + Region *string `min:"1" type:"string" required:"true" enum:"CloudWatchRegion"` +} + +// String returns the string representation +func (s AlarmIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AlarmIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AlarmIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AlarmIdentifier"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Region == nil { + invalidParams.Add(request.NewErrParamRequired("Region")) + } + if s.Region != nil && len(*s.Region) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Region", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Alias resource record sets only: Information about the CloudFront distribution, +// ELB load balancer, Amazon S3 bucket, or Amazon Route 53 resource record set +// to which you are routing traffic. +// +// If you're creating resource record sets for a private hosted zone, note +// the following: +// +// You can create alias resource record sets only for Amazon Route 53 resource +// record sets in the same private hosted zone. Creating alias resource record +// sets for CloudFront distributions, ELB load balancers, and Amazon S3 buckets +// is not supported. You can't create alias resource record sets for failover, +// geolocation, or latency resource record sets in a private hosted zone. +type AliasTarget struct { + _ struct{} `type:"structure"` + + // Alias resource record sets only: The external DNS name associated with the + // AWS Resource. The value that you specify depends on where you want to route + // queries: + // + // A CloudFront distribution: Specify the domain name that CloudFront assigned + // when you created your distribution. Your CloudFront distribution must include + // an alternate domain name that matches the name of the resource record set. + // For example, if the name of the resource record set is acme.example.com, + // your CloudFront distribution must include acme.example.com as one of the + // alternate domain names. For more information, see Using Alternate Domain + // Names (CNAMEs) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/CNAMEs.html) + // in the Amazon CloudFront Developer Guide. An ELB load balancer: Specify the + // DNS name associated with the load balancer. You can get the DNS name by using + // the AWS Management Console, the ELB API, or the AWS CLI. Use the same method + // to get values for HostedZoneId and DNSName. If you get one value from the + // console and the other value from the API or the CLI, creating the resource + // record set will fail. An Elastic Beanstalk environment: Specify the CNAME + // attribute for the environment. (The environment must have a regionalized + // domain name.) An Amazon S3 bucket that is configured as a static website: + // Specify the domain name of the Amazon S3 website endpoint in which you created + // the bucket; for example, s3-website-us-east-1.amazonaws.com. For more information + // about valid values, see the table Amazon Simple Storage Service (S3) Website + // Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. For more information about + // using Amazon S3 buckets for websites, see Hosting a Static Website on Amazon + // S3 (http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) in + // the Amazon Simple Storage Service Developer Guide. Another Amazon Route 53 + // resource record set: Specify the value of the Name element for a resource + // record set in the current hosted zone. + DNSName *string `type:"string" required:"true"` + + // Alias resource record sets only: If you set the value of EvaluateTargetHealth + // to true for the resource record set or sets in an alias, weighted alias, + // latency alias, or failover alias resource record set, and if you specify + // a value for HealthCheckId for every resource record set that is referenced + // by these alias resource record sets, the alias resource record sets inherit + // the health of the referenced resource record sets. + // + // In this configuration, when Amazon Route 53 receives a DNS query for an + // alias resource record set: + // + // Amazon Route 53 looks at the resource record sets that are referenced by + // the alias resource record sets to determine which health checks they're using. + // Amazon Route 53 checks the current status of each health check. (Amazon Route + // 53 periodically checks the health of the endpoint that is specified in a + // health check; it doesn't perform the health check when the DNS query arrives.) + // Based on the status of the health checks, Amazon Route 53 determines which + // resource record sets are healthy. Unhealthy resource record sets are immediately + // removed from consideration. In addition, if all of the resource record sets + // that are referenced by an alias resource record set are unhealthy, that alias + // resource record set also is immediately removed from consideration. Based + // on the configuration of the alias resource record sets (weighted alias or + // latency alias, for example) and the configuration of the resource record + // sets that they reference, Amazon Route 53 chooses a resource record set from + // the healthy resource record sets, and responds to the query. Note the following: + // + // You cannot set EvaluateTargetHealth to true when the alias target is a CloudFront + // distribution. If the AWS resource that you specify in AliasTarget is a resource + // record set or a group of resource record sets (for example, a group of weighted + // resource record sets), but it is not another alias resource record set, we + // recommend that you associate a health check with all of the resource record + // sets in the alias target. If you specify an ELB load balancer in AliasTarget, + // Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances + // that are registered with the load balancer. If no Amazon EC2 instances are + // healthy or if the load balancer itself is unhealthy, and if EvaluateTargetHealth + // is true for the corresponding alias resource record set, Amazon Route 53 + // routes queries to other resources. When you create a load balancer, you configure + // settings for Elastic Load Balancing health checks; they're not Amazon Route + // 53 health checks, but they perform a similar function. Do not create Amazon + // Route 53 health checks for the Amazon EC2 instances that you register with + // an ELB load balancer. For more information, see How Health Checks Work in + // More Complex Amazon Route 53 Configurations (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html) + // in the Amazon Route 53 Developer Guide. We recommend that you set EvaluateTargetHealth + // to true only when you have enough idle capacity to handle the failure of + // one or more endpoints. + // + // For more information and examples, see Amazon Route 53 Health Checks and + // DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // in the Amazon Route 53 Developer Guide. + EvaluateTargetHealth *bool `type:"boolean" required:"true"` + + // Alias resource record sets only: The value you use depends on where you want + // to route queries: + // + // A CloudFront distribution: Specify Z2FDTNDATAQYW2. An ELB load balancer: + // Specify the value of the hosted zone ID for the load balancer. You can get + // the hosted zone ID by using the AWS Management Console, the ELB API, or the + // AWS CLI. Use the same method to get values for HostedZoneId and DNSName. + // If you get one value from the console and the other value from the API or + // the CLI, creating the resource record set will fail. An Amazon S3 bucket + // that is configured as a static website: Specify the hosted zone ID for the + // Amazon S3 website endpoint in which you created the bucket. For more information + // about valid values, see the table Amazon Simple Storage Service (S3) Website + // Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. Another Amazon Route 53 resource + // record set in your hosted zone: Specify the hosted zone ID of your hosted + // zone. (An alias resource record set cannot reference a resource record set + // in a different hosted zone.) + HostedZoneId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AliasTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AliasTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AliasTarget"} + if s.DNSName == nil { + invalidParams.Add(request.NewErrParamRequired("DNSName")) + } + if s.EvaluateTargetHealth == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluateTargetHealth")) + } + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the request to associate a +// VPC with an hosted zone. +type AssociateVPCWithHostedZoneInput struct { + _ struct{} `locationName:"AssociateVPCWithHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Optional: Any comments you want to include about a AssociateVPCWithHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to associate your VPC with. + // + // Note that you cannot associate a VPC with a hosted zone that doesn't have + // an existing VPC association. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. + VPC *VPC `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AssociateVPCWithHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVPCWithHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateVPCWithHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateVPCWithHostedZoneInput"} + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.VPC == nil { + invalidParams.Add(request.NewErrParamRequired("VPC")) + } + if s.VPC != nil { + if err := s.VPC.Validate(); err != nil { + invalidParams.AddNested("VPC", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response information for the request. +type AssociateVPCWithHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your AssociateVPCWithHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AssociateVPCWithHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVPCWithHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information for each change in a change +// batch request. +type Change struct { + _ struct{} `type:"structure"` + + // The action to perform: + // + // CREATE: Creates a resource record set that has the specified values. DELETE: + // Deletes a existing resource record set that has the specified values for + // Name, Type, SetIdentifier (for latency, weighted, geolocation, and failover + // resource record sets), and TTL (except alias resource record sets, for which + // the TTL is determined by the AWS resource that you're routing DNS queries + // to). UPSERT: If a resource record set does not already exist, Amazon Route + // 53 creates it. If a resource record set does exist, Amazon Route 53 updates + // it with the values in the request. Amazon Route 53 can update an existing + // resource record set only when all of the following values match: Name, Type, + // and SetIdentifier (for weighted, latency, geolocation, and failover resource + // record sets). + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Information about the resource record set to create or delete. + ResourceRecordSet *ResourceRecordSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Change) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Change) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Change) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Change"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.ResourceRecordSet == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceRecordSet")) + } + if s.ResourceRecordSet != nil { + if err := s.ResourceRecordSet.Validate(); err != nil { + invalidParams.AddNested("ResourceRecordSet", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains an optional comment and the changes that you +// want to make with a change batch request. +type ChangeBatch struct { + _ struct{} `type:"structure"` + + // A complex type that contains one Change element for each resource record + // set that you want to create or delete. + Changes []*Change `locationNameList:"Change" min:"1" type:"list" required:"true"` + + // Optional: Any comments you want to include about a change batch request. + Comment *string `type:"string"` +} + +// String returns the string representation +func (s ChangeBatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeBatch) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangeBatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangeBatch"} + if s.Changes == nil { + invalidParams.Add(request.NewErrParamRequired("Changes")) + } + if s.Changes != nil && len(s.Changes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Changes", 1)) + } + if s.Changes != nil { + for i, v := range s.Changes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Changes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that lists the changes and information for a ChangeBatch. +type ChangeBatchRecord struct { + _ struct{} `deprecated:"true" type:"structure"` + + // A list of changes made in the ChangeBatch. + Changes []*Change `locationNameList:"Change" min:"1" type:"list"` + + // A complex type that describes change information about changes made to your + // hosted zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + Comment *string `type:"string"` + + // The ID of the request. Use this ID to track when the change has completed + // across all Amazon Route 53 DNS servers. + Id *string `type:"string" required:"true"` + + // The current state of the request. PENDING indicates that this request has + // not yet been applied to all Amazon Route 53 DNS servers. + // + // Valid Values: PENDING | INSYNC + Status *string `type:"string" required:"true" enum:"ChangeStatus"` + + // The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + SubmittedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The AWS account ID attached to the changes. + Submitter *string `type:"string"` +} + +// String returns the string representation +func (s ChangeBatchRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeBatchRecord) GoString() string { + return s.String() +} + +// A complex type that describes change information about changes made to your +// hosted zone. +// +// This element contains an ID that you use when performing a GetChange action +// to get detailed information about the change. +type ChangeInfo struct { + _ struct{} `type:"structure"` + + // A complex type that describes change information about changes made to your + // hosted zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + Comment *string `type:"string"` + + // The ID of the request. Use this ID to track when the change has completed + // across all Amazon Route 53 DNS servers. + Id *string `type:"string" required:"true"` + + // The current state of the request. PENDING indicates that this request has + // not yet been applied to all Amazon Route 53 DNS servers. + // + // Valid Values: PENDING | INSYNC + Status *string `type:"string" required:"true" enum:"ChangeStatus"` + + // The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + SubmittedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s ChangeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeInfo) GoString() string { + return s.String() +} + +// A complex type that contains a change batch. +type ChangeResourceRecordSetsInput struct { + _ struct{} `locationName:"ChangeResourceRecordSetsRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains an optional comment and the Changes element. + ChangeBatch *ChangeBatch `type:"structure" required:"true"` + + // The ID of the hosted zone that contains the resource record sets that you + // want to change. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangeResourceRecordSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeResourceRecordSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangeResourceRecordSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangeResourceRecordSetsInput"} + if s.ChangeBatch == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeBatch")) + } + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.ChangeBatch != nil { + if err := s.ChangeBatch.Validate(); err != nil { + invalidParams.AddNested("ChangeBatch", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response for the request. +type ChangeResourceRecordSetsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about changes made to your hosted + // zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChangeResourceRecordSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeResourceRecordSetsOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request to add, change, or +// delete the tags that are associated with a resource. +type ChangeTagsForResourceInput struct { + _ struct{} `locationName:"ChangeTagsForResourceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains a list of Tag elements. Each Tag element identifies + // a tag that you want to add or update for the specified resource. + AddTags []*Tag `locationNameList:"Tag" min:"1" type:"list"` + + // A list of Tag keys that you want to remove from the specified resource. + RemoveTagKeys []*string `locationNameList:"Key" min:"1" type:"list"` + + // The ID of the resource for which you want to add, change, or delete tags. + ResourceId *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ChangeTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangeTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangeTagsForResourceInput"} + if s.AddTags != nil && len(s.AddTags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AddTags", 1)) + } + if s.RemoveTagKeys != nil && len(s.RemoveTagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RemoveTagKeys", 1)) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Empty response for the request. +type ChangeTagsForResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangeTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeTagsForResourceOutput) GoString() string { + return s.String() +} + +// For CLOUDWATCH_METRIC health checks, a complex type that contains information +// about the CloudWatch alarm that you're associating with the health check. +type CloudWatchAlarmConfiguration struct { + _ struct{} `type:"structure"` + + // The arithmetic operation to use when comparing the specified Statistic and + // Threshold. + // + // Valid Values are GreaterThanOrEqualToThreshold, GreaterThanThreshold, LessThanThreshold + // and LessThanOrEqualToThreshold + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` + + // A list of Dimension elements for the CloudWatch metric that is associated + // with the CloudWatch alarm. For information about the metrics and dimensions + // that CloudWatch supports, see Amazon CloudWatch Namespaces, Dimensions, and + // Metrics Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html). + Dimensions []*Dimension `locationNameList:"Dimension" type:"list"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `min:"1" type:"integer" required:"true"` + + // The name of the CloudWatch metric that is associated with the CloudWatch + // alarm. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the CloudWatch metric that is associated with the CloudWatch + // alarm. + Namespace *string `min:"1" type:"string" required:"true"` + + // An integer that represents the period in seconds over which the statistic + // is applied. + Period *int64 `min:"60" type:"integer" required:"true"` + + // The statistic to apply to the CloudWatch metric that is associated with the + // CloudWatch alarm. + // + // Valid Values are SampleCount, Average, Sum, Minimum and Maximum + Statistic *string `type:"string" required:"true" enum:"Statistic"` + + // The value that the metric is compared with to determine the state of the + // alarm. For example, if you want the health check to fail if the average TCP + // connection time is greater than 500 milliseconds for more than 60 seconds, + // the threshold is 500. + Threshold *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s CloudWatchAlarmConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudWatchAlarmConfiguration) GoString() string { + return s.String() +} + +// >A complex type that contains information about the request to create a health +// check. +type CreateHealthCheckInput struct { + _ struct{} `locationName:"CreateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateHealthCheck + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a health + // check. CallerReference can be any unique string; you might choose to use + // a string that identifies your project. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHealthCheckInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateHealthCheckInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateHealthCheckInput"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.CallerReference != nil && len(*s.CallerReference) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CallerReference", 1)) + } + if s.HealthCheckConfig == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckConfig")) + } + if s.HealthCheckConfig != nil { + if err := s.HealthCheckConfig.Validate(); err != nil { + invalidParams.AddNested("HealthCheckConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response information for the new health check. +type CreateHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` + + // The unique URL representing the new health check. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to create a hosted +// zone. +type CreateHostedZoneInput struct { + _ struct{} `locationName:"CreateHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateHostedZone + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a hosted + // zone. CallerReference can be any unique string; you might choose to use a + // string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // The delegation set id of the reusable delgation set whose NS records you + // want to assign to the new hosted zone. + DelegationSetId *string `type:"string"` + + // A complex type that contains an optional comment about your hosted zone. + HostedZoneConfig *HostedZoneConfig `type:"structure"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Amazon Route 53 assumes that + // the domain name is fully qualified. This means that Amazon Route 53 treats + // www.example.com (without a trailing dot) and www.example.com. (with a trailing + // dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. By providing + // this parameter, your newly created hosted cannot be resolved anywhere other + // than the given VPC. + VPC *VPC `type:"structure"` +} + +// String returns the string representation +func (s CreateHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateHostedZoneInput"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.CallerReference != nil && len(*s.CallerReference) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CallerReference", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.VPC != nil { + if err := s.VPC.Validate(); err != nil { + invalidParams.AddNested("VPC", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response information for the new hosted zone. +type CreateHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the request to create a hosted + // zone. This includes an ID that you use when you call the GetChange action + // to get the current status of the change request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` + + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // A complex type that contains identifying information about the hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // The unique URL representing the new hosted zone. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + VPC *VPC `type:"structure"` +} + +// String returns the string representation +func (s CreateHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHostedZoneOutput) GoString() string { + return s.String() +} + +type CreateReusableDelegationSetInput struct { + _ struct{} `locationName:"CreateReusableDelegationSetRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateReusableDelegationSet + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a reusable + // delegation set. CallerReference can be any unique string; you might choose + // to use a string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // The ID of the hosted zone whose delegation set you want to mark as reusable. + // It is an optional parameter. + HostedZoneId *string `type:"string"` +} + +// String returns the string representation +func (s CreateReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReusableDelegationSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReusableDelegationSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReusableDelegationSetInput"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.CallerReference != nil && len(*s.CallerReference) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CallerReference", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // The unique URL representing the new reusbale delegation set. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy that you +// want to create. +type CreateTrafficPolicyInput struct { + _ struct{} `locationName:"CreateTrafficPolicyRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Any comments that you want to include about the traffic policy. + Comment *string `type:"string"` + + // The definition of this traffic policy in JSON format. For more information, + // see Traffic Policy Document Format (http://docs.aws.amazon.com/Route53/latest/APIReference/api-policies-traffic-policy-document-format.html) + // in the Amazon Route 53 API Reference. + Document *string `type:"string" required:"true"` + + // The name of the traffic policy. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrafficPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrafficPolicyInput"} + if s.Document == nil { + invalidParams.Add(request.NewErrParamRequired("Document")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the resource record sets that +// you want to create based on a specified traffic policy. +type CreateTrafficPolicyInstanceInput struct { + _ struct{} `locationName:"CreateTrafficPolicyInstanceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The ID of the hosted zone in which you want Amazon Route 53 to create resource + // record sets by using the configuration in a traffic policy. + HostedZoneId *string `type:"string" required:"true"` + + // The domain name (such as example.com) or subdomain name (such as www.example.com) + // for which Amazon Route 53 responds to DNS queries by using the resource record + // sets that Amazon Route 53 creates for this traffic policy instance. + Name *string `type:"string" required:"true"` + + // The TTL that you want Amazon Route 53 to assign to all of the resource record + // sets that it creates in the specified hosted zone. + TTL *int64 `type:"long" required:"true"` + + // The ID of the traffic policy that you want to use to create resource record + // sets in the specified hosted zone. + TrafficPolicyId *string `type:"string" required:"true"` + + // The version of the traffic policy that you want to use to create resource + // record sets in the specified hosted zone. + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrafficPolicyInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrafficPolicyInstanceInput"} + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.TTL == nil { + invalidParams.Add(request.NewErrParamRequired("TTL")) + } + if s.TrafficPolicyId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyId")) + } + if s.TrafficPolicyVersion == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyVersion")) + } + if s.TrafficPolicyVersion != nil && *s.TrafficPolicyVersion < 1 { + invalidParams.Add(request.NewErrParamMinValue("TrafficPolicyVersion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the CreateTrafficPolicyInstance +// request. +type CreateTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A unique URL that represents a new traffic policy instance. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the CreateTrafficPolicy +// request. +type CreateTrafficPolicyOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy for which +// you want to create a new version. +type CreateTrafficPolicyVersionInput struct { + _ struct{} `locationName:"CreateTrafficPolicyVersionRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Any comments that you want to include about the new traffic policy version. + Comment *string `type:"string"` + + // The definition of a new traffic policy version, in JSON format. You must + // specify the full definition of the new traffic policy. You cannot specify + // just the differences between the new version and a previous version. For + // more information, see Traffic Policy Document Format (http://docs.aws.amazon.com/Route53/latest/APIReference/api-policies-traffic-policy-document-format.html) + // in the Amazon Route 53 API Reference. + Document *string `type:"string" required:"true"` + + // The ID of the traffic policy for which you want to create a new version. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrafficPolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrafficPolicyVersionInput"} + if s.Document == nil { + invalidParams.Add(request.NewErrParamRequired("Document")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the CreateTrafficPolicyVersion +// request. +type CreateTrafficPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new version of the traffic + // policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyVersionOutput) GoString() string { + return s.String() +} + +// A complex type that contains name server information. +type DelegationSet struct { + _ struct{} `type:"structure"` + + CallerReference *string `min:"1" type:"string"` + + Id *string `type:"string"` + + // A complex type that contains the authoritative name servers for the hosted + // zone. Use the method provided by your domain registrar to add an NS record + // to your domain for each NameServer that is assigned to your hosted zone. + NameServers []*string `locationNameList:"NameServer" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DelegationSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DelegationSet) GoString() string { + return s.String() +} + +// A complex type containing the request information for delete health check. +type DeleteHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check to delete. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHealthCheckInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteHealthCheckInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteHealthCheckInput"} + if s.HealthCheckId == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Empty response for the request. +type DeleteHealthCheckOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the hosted zone that you want +// to delete. +type DeleteHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteHostedZoneInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response information for the request. +type DeleteHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your delete request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type containing the information for the delete request. +type DeleteReusableDelegationSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the reusable delegation set you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReusableDelegationSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReusableDelegationSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReusableDelegationSetInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Empty response for the request. +type DeleteReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// A request to delete a specified traffic policy version. +type DeleteTrafficPolicyInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy that you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version number of the traffic policy that you want to delete. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficPolicyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && *s.Version < 1 { + invalidParams.Add(request.NewErrParamMinValue("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the traffic policy instance +// that you want to delete. +type DeleteTrafficPolicyInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy instance that you want to delete. + // + // When you delete a traffic policy instance, Amazon Route 53 also deletes + // all of the resource record sets that were created when you created the traffic + // policy instance. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficPolicyInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficPolicyInstanceInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element. +type DeleteTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// An empty element. +type DeleteTrafficPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyOutput) GoString() string { + return s.String() +} + +// The name and value of a dimension for a CloudWatch metric. +type Dimension struct { + _ struct{} `type:"structure"` + + // The name of the dimension. + Name *string `min:"1" type:"string" required:"true"` + + // The value of the dimension. + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Dimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dimension) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to disassociate +// a VPC from an hosted zone. +type DisassociateVPCFromHostedZoneInput struct { + _ struct{} `locationName:"DisassociateVPCFromHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Optional: Any comments you want to include about a DisassociateVPCFromHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to disassociate your VPC from. + // + // Note that you cannot disassociate the last VPC from a hosted zone. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be disassociated from. + VPC *VPC `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DisassociateVPCFromHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVPCFromHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateVPCFromHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateVPCFromHostedZoneInput"} + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.VPC == nil { + invalidParams.Add(request.NewErrParamRequired("VPC")) + } + if s.VPC != nil { + if err := s.VPC.Validate(); err != nil { + invalidParams.AddNested("VPC", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response information for the request. +type DisassociateVPCFromHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your DisassociateVPCFromHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DisassociateVPCFromHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVPCFromHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about a geo location. +type GeoLocation struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `min:"2" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GeoLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GeoLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GeoLocation"} + if s.ContinentCode != nil && len(*s.ContinentCode) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ContinentCode", 2)) + } + if s.CountryCode != nil && len(*s.CountryCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CountryCode", 1)) + } + if s.SubdivisionCode != nil && len(*s.SubdivisionCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SubdivisionCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about a GeoLocation. +type GeoLocationDetails struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + ContinentCode *string `min:"2" type:"string"` + + // The name of the continent. This element is only present if ContinentCode + // is also present. + ContinentName *string `min:"1" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `min:"1" type:"string"` + + // The name of the country. This element is only present if CountryCode is also + // present. + CountryName *string `min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + SubdivisionCode *string `min:"1" type:"string"` + + // The name of the subdivision. This element is only present if SubdivisionCode + // is also present. + SubdivisionName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GeoLocationDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoLocationDetails) GoString() string { + return s.String() +} + +// The input for a GetChangeDetails request. +type GetChangeDetailsInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The ID of the change batch request. The value that you specify here is the + // value that ChangeResourceRecordSets returned in the Id element when you submitted + // the request. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetChangeDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetChangeDetailsInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the ChangeBatchRecord element. +type GetChangeDetailsOutput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // A complex type that contains information about the specified change batch, + // including the change batch ID, the status of the change, and the contained + // changes. + ChangeBatchRecord *ChangeBatchRecord `deprecated:"true" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetChangeDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeDetailsOutput) GoString() string { + return s.String() +} + +// The input for a GetChange request. +type GetChangeInput struct { + _ struct{} `type:"structure"` + + // The ID of the change batch request. The value that you specify here is the + // value that ChangeResourceRecordSets returned in the Id element when you submitted + // the request. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetChangeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetChangeInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the ChangeInfo element. +type GetChangeOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the specified change batch, + // including the change batch ID, the status of the change, and the date and + // time of the request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeOutput) GoString() string { + return s.String() +} + +// Empty request. +type GetCheckerIpRangesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCheckerIpRangesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCheckerIpRangesInput) GoString() string { + return s.String() +} + +// A complex type that contains the CheckerIpRanges element. +type GetCheckerIpRangesOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains sorted list of IP ranges in CIDR format for + // Amazon Route 53 health checkers. + CheckerIpRanges []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetCheckerIpRangesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCheckerIpRangesOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get a geo location. +type GetGeoLocationInput struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `location:"querystring" locationName:"continentcode" min:"2" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `location:"querystring" locationName:"countrycode" min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `location:"querystring" locationName:"subdivisioncode" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetGeoLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGeoLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGeoLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGeoLocationInput"} + if s.ContinentCode != nil && len(*s.ContinentCode) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ContinentCode", 2)) + } + if s.CountryCode != nil && len(*s.CountryCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CountryCode", 1)) + } + if s.SubdivisionCode != nil && len(*s.SubdivisionCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SubdivisionCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing information about the specified geo location. +type GetGeoLocationOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the specified geo location. + GeoLocationDetails *GeoLocationDetails `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetGeoLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGeoLocationOutput) GoString() string { + return s.String() +} + +// To retrieve a count of all your health checks, send a GET request to the +// /Route 53 API version/healthcheckcount resource. +type GetHealthCheckCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetHealthCheckCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckCountInput) GoString() string { + return s.String() +} + +// A complex type that contains the count of health checks associated with the +// current AWS account. +type GetHealthCheckCountOutput struct { + _ struct{} `type:"structure"` + + // The number of health checks associated with the current AWS account. + HealthCheckCount *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckCountOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get a health +// check. +type GetHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check to retrieve. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetHealthCheckInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetHealthCheckInput"} + if s.HealthCheckId == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the request to get the most +// recent failure reason for a health check. +type GetHealthCheckLastFailureReasonInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check for which you want to retrieve the reason for + // the most recent failure. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckLastFailureReasonInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckLastFailureReasonInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetHealthCheckLastFailureReasonInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetHealthCheckLastFailureReasonInput"} + if s.HealthCheckId == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the most recent failure for +// the specified health check. +type GetHealthCheckLastFailureReasonOutput struct { + _ struct{} `type:"structure"` + + // A list that contains one HealthCheckObservation element for each Amazon Route + // 53 health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckLastFailureReasonOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckLastFailureReasonOutput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified health check. +type GetHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the specified health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get health +// check status for a health check. +type GetHealthCheckStatusInput struct { + _ struct{} `type:"structure"` + + // If you want Amazon Route 53 to return this resource record set in response + // to a DNS query only when a health check is passing, include the HealthCheckId + // element and specify the ID of the applicable health check. + // + // Amazon Route 53 determines whether a resource record set is healthy by periodically + // sending a request to the endpoint that is specified in the health check. + // If that endpoint returns an HTTP status code of 2xx or 3xx, the endpoint + // is healthy. If the endpoint returns an HTTP status code of 400 or greater, + // or if the endpoint doesn't respond for a certain amount of time, Amazon Route + // 53 considers the endpoint unhealthy and also considers the resource record + // set unhealthy. + // + // The HealthCheckId element is only useful when Amazon Route 53 is choosing + // between two or more resource record sets to respond to a DNS query, and you + // want Amazon Route 53 to base the choice in part on the status of a health + // check. Configuring health checks only makes sense in the following configurations: + // + // You're checking the health of the resource record sets in a weighted, latency, + // geolocation, or failover resource record set, and you specify health check + // IDs for all of the resource record sets. If the health check for one resource + // record set specifies an endpoint that is not healthy, Amazon Route 53 stops + // responding to queries using the value for that resource record set. You set + // EvaluateTargetHealth to true for the resource record sets in an alias, weighted + // alias, latency alias, geolocation alias, or failover alias resource record + // set, and you specify health check IDs for all of the resource record sets + // that are referenced by the alias resource record sets. For more information + // about this configuration, see EvaluateTargetHealth. + // + // Amazon Route 53 doesn't check the health of the endpoint specified in the + // resource record set, for example, the endpoint specified by the IP address + // in the Value element. When you add a HealthCheckId element to a resource + // record set, Amazon Route 53 checks the health of the endpoint that you specified + // in the health check. + // + // For geolocation resource record sets, if an endpoint is unhealthy, Amazon + // Route 53 looks for a resource record set for the larger, associated geographic + // region. For example, suppose you have resource record sets for a state in + // the United States, for the United States, for North America, and for all + // locations. If the endpoint for the state resource record set is unhealthy, + // Amazon Route 53 checks the resource record sets for the United States, for + // North America, and for all locations (a resource record set for which the + // value of CountryCode is *), in that order, until it finds a resource record + // set for which the endpoint is healthy. + // + // If your health checks specify the endpoint only by domain name, we recommend + // that you create a separate health check for each endpoint. For example, create + // a health check for each HTTP server that is serving content for www.example.com. + // For the value of FullyQualifiedDomainName, specify the domain name of the + // server (such as us-east-1-www.example.com), not the name of the resource + // record sets (example.com). + // + // In this configuration, if you create a health check for which the value + // of FullyQualifiedDomainName matches the name of the resource record sets + // and then associate the health check with those resource record sets, health + // check results will be unpredictable. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetHealthCheckStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetHealthCheckStatusInput"} + if s.HealthCheckId == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the status of the specified +// health check. +type GetHealthCheckStatusOutput struct { + _ struct{} `type:"structure"` + + // A list that contains one HealthCheckObservation element for each Amazon Route + // 53 health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckStatusOutput) GoString() string { + return s.String() +} + +// To retrieve a count of all your hosted zones, send a GET request to the /Route +// 53 API version/hostedzonecount resource. +type GetHostedZoneCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetHostedZoneCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneCountInput) GoString() string { + return s.String() +} + +// A complex type that contains the count of hosted zones associated with the +// current AWS account. +type GetHostedZoneCountOutput struct { + _ struct{} `type:"structure"` + + // The number of hosted zones associated with the current AWS account. + HostedZoneCount *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s GetHostedZoneCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneCountOutput) GoString() string { + return s.String() +} + +// The input for a GetHostedZone request. +type GetHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone for which you want to get a list of the name servers + // in the delegation set. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetHostedZoneInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing information about the specified hosted zone. +type GetHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the name servers for the specified + // hosted zone. + DelegationSet *DelegationSet `type:"structure"` + + // A complex type that contains the information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // A complex type that contains information about VPCs associated with the specified + // hosted zone. + VPCs []*VPC `locationNameList:"VPC" min:"1" type:"list"` +} + +// String returns the string representation +func (s GetHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneOutput) GoString() string { + return s.String() +} + +// The input for a GetReusableDelegationSet request. +type GetReusableDelegationSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the reusable delegation set for which you want to get a list of + // the name server. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReusableDelegationSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetReusableDelegationSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetReusableDelegationSetInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing information about the specified reusable delegation +// set. +type GetReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the nameservers for the + // specified delegation set ID. + DelegationSet *DelegationSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// Gets information about a specific traffic policy version. To get the information, +// send a GET request to the /Route 53 API version/trafficpolicy resource, and +// specify the ID and the version of the traffic policy. +type GetTrafficPolicyInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy that you want to get information about. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version number of the traffic policy that you want to get information + // about. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTrafficPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTrafficPolicyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && *s.Version < 1 { + invalidParams.Add(request.NewErrParamMinValue("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// To retrieve a count of all your traffic policy instances, send a GET request +// to the /Route 53 API version/trafficpolicyinstancecount resource. +type GetTrafficPolicyInstanceCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceCountInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the number of traffic policy +// instances that are associated with the current AWS account. +type GetTrafficPolicyInstanceCountOutput struct { + _ struct{} `type:"structure"` + + // The number of traffic policy instances that are associated with the current + // AWS account. + TrafficPolicyInstanceCount *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceCountOutput) GoString() string { + return s.String() +} + +// Gets information about a specified traffic policy instance. +// +// To get information about a traffic policy instance, send a GET request to +// the /Route 53 API version/trafficpolicyinstance/Id resource. +type GetTrafficPolicyInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy instance that you want to get information about. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTrafficPolicyInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTrafficPolicyInstanceInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the resource record sets that +// Amazon Route 53 created based on a specified traffic policy. +type GetTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type GetTrafficPolicyOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the specified traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains identifying information about the health check. +type HealthCheck struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the request to create the health check. + CallerReference *string `min:"1" type:"string" required:"true"` + + // For CLOUDWATCH_METRIC health checks, a complex type that contains information + // about the CloudWatch alarm that you're associating with the health check. + CloudWatchAlarmConfiguration *CloudWatchAlarmConfiguration `type:"structure"` + + // A complex type that contains the health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` + + // The version of the health check. You can optionally pass this value in a + // call to UpdateHealthCheck to prevent overwriting another change to the health + // check. + HealthCheckVersion *int64 `min:"1" type:"long" required:"true"` + + // The ID of the specified health check. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s HealthCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheck) GoString() string { + return s.String() +} + +// A complex type that contains the health check configuration. +type HealthCheckConfig struct { + _ struct{} `type:"structure"` + + // A complex type that contains information to uniquely identify the CloudWatch + // alarm that you're associating with a Route 53 health check. + AlarmIdentifier *AlarmIdentifier `type:"structure"` + + // For a specified parent health check, a list of HealthCheckId values for the + // associated child health checks. + ChildHealthChecks []*string `locationNameList:"ChildHealthCheck" type:"list"` + + // Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName + // to the endpoint in the client_hello message during TLS negotiation. If you + // don't specify a value for EnableSNI, Amazon Route 53 defaults to true when + // Type is HTTPS or HTTPS_STR_MATCH and defaults to false when Type is any other + // value. + EnableSNI *bool `type:"boolean"` + + // The number of consecutive health checks that an endpoint must pass or fail + // for Amazon Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + FailureThreshold *int64 `min:"1" type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + FullyQualifiedDomainName *string `type:"string"` + + // The minimum number of child health checks that must be healthy for Amazon + // Route 53 to consider the parent health check to be healthy. Valid values + // are integers between 0 and 256, inclusive. + HealthThreshold *int64 `type:"integer"` + + // IP Address of the instance being checked. + IPAddress *string `type:"string"` + + // The status of the health check when CloudWatch has insufficient data about + // the state of associated alarm. Valid values are Healthy, Unhealthy and LastKnownStatus. + InsufficientDataHealthStatus *string `type:"string" enum:"InsufficientDataHealthStatus"` + + // A boolean value that indicates whether the status of health check should + // be inverted. For example, if a health check is healthy but Inverted is True, + // then Amazon Route 53 considers the health check to be unhealthy. + Inverted *bool `type:"boolean"` + + // A Boolean value that indicates whether you want Amazon Route 53 to measure + // the latency between health checkers in multiple AWS regions and your endpoint + // and to display CloudWatch latency graphs in the Amazon Route 53 console. + MeasureLatency *bool `type:"boolean"` + + // Port on which connection will be opened to the instance to health check. + // For HTTP and HTTP_STR_MATCH this defaults to 80 if the port is not specified. + // For HTTPS and HTTPS_STR_MATCH this defaults to 443 if the port is not specified. + Port *int64 `min:"1" type:"integer"` + + // A list of HealthCheckRegion values that you want Amazon Route 53 to use to + // perform health checks for the specified endpoint. You must specify at least + // three regions. + Regions []*string `locationNameList:"Region" min:"1" type:"list"` + + // The number of seconds between the time that Amazon Route 53 gets a response + // from your endpoint and the time that it sends the next health-check request. + // + // Each Amazon Route 53 health checker makes requests at this interval. Valid + // values are 10 and 30. The default value is 30. + RequestInterval *int64 `min:"10" type:"integer"` + + // Path to ping on the instance to check the health. Required for HTTP, HTTPS, + // HTTP_STR_MATCH, and HTTPS_STR_MATCH health checks. The HTTP request is issued + // to the instance on the given port and path. + ResourcePath *string `type:"string"` + + // A string to search for in the body of a health check response. Required for + // HTTP_STR_MATCH and HTTPS_STR_MATCH health checks. Amazon Route 53 considers + // case when searching for SearchString in the response body. + SearchString *string `type:"string"` + + // The type of health check to be performed. Currently supported types are TCP, + // HTTP, HTTPS, HTTP_STR_MATCH, HTTPS_STR_MATCH, CALCULATED and CLOUDWATCH_METRIC. + Type *string `type:"string" required:"true" enum:"HealthCheckType"` +} + +// String returns the string representation +func (s HealthCheckConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheckConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HealthCheckConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HealthCheckConfig"} + if s.FailureThreshold != nil && *s.FailureThreshold < 1 { + invalidParams.Add(request.NewErrParamMinValue("FailureThreshold", 1)) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.Regions != nil && len(s.Regions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regions", 1)) + } + if s.RequestInterval != nil && *s.RequestInterval < 10 { + invalidParams.Add(request.NewErrParamMinValue("RequestInterval", 10)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.AlarmIdentifier != nil { + if err := s.AlarmIdentifier.Validate(); err != nil { + invalidParams.AddNested("AlarmIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the IP address of a Amazon Route 53 health checker +// and the reason for the health check status. +type HealthCheckObservation struct { + _ struct{} `type:"structure"` + + // The IP address of the Amazon Route 53 health checker that performed this + // health check. + IPAddress *string `type:"string"` + + // The HealthCheckRegion of the Amazon Route 53 health checker that performed + // this health check. + Region *string `min:"1" type:"string" enum:"HealthCheckRegion"` + + // A complex type that contains information about the health check status for + // the current observation. + StatusReport *StatusReport `type:"structure"` +} + +// String returns the string representation +func (s HealthCheckObservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheckObservation) GoString() string { + return s.String() +} + +// A complex type that contain information about the specified hosted zone. +type HostedZone struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the request to create the hosted zone. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains the Comment element. + Config *HostedZoneConfig `type:"structure"` + + // The ID of the specified hosted zone. + Id *string `type:"string" required:"true"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Amazon Route 53 assumes that + // the domain name is fully qualified. This means that Amazon Route 53 treats + // www.example.com (without a trailing dot) and www.example.com. (with a trailing + // dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // Total number of resource record sets in the hosted zone. + ResourceRecordSetCount *int64 `type:"long"` +} + +// String returns the string representation +func (s HostedZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZone) GoString() string { + return s.String() +} + +// A complex type that contains an optional comment about your hosted zone. +// If you don't want to specify a comment, you can omit the HostedZoneConfig +// and Comment elements from the XML document. +type HostedZoneConfig struct { + _ struct{} `type:"structure"` + + // An optional comment about your hosted zone. If you don't want to specify + // a comment, you can omit the HostedZoneConfig and Comment elements from the + // XML document. + Comment *string `type:"string"` + + // GetHostedZone and ListHostedZone responses: A Boolean value that indicates + // whether a hosted zone is private. + // + // CreateHostedZone requests: When you're creating a private hosted zone (when + // you specify values for VPCId and VPCRegion), you can optionally specify true + // for PrivateZone. + PrivateZone *bool `type:"boolean"` +} + +// String returns the string representation +func (s HostedZoneConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZoneConfig) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByHostedZone request. +type ListChangeBatchesByHostedZoneInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The end of the time period you want to see changes for. + EndDate *string `location:"querystring" locationName:"endDate" deprecated:"true" type:"string" required:"true"` + + // The ID of the hosted zone that you want to see changes for. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The page marker. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The maximum number of items on a page. + MaxItems *string `location:"querystring" locationName:"maxItems" type:"string"` + + // The start of the time period you want to see changes for. + StartDate *string `location:"querystring" locationName:"startDate" deprecated:"true" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListChangeBatchesByHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListChangeBatchesByHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListChangeBatchesByHostedZoneInput"} + if s.EndDate == nil { + invalidParams.Add(request.NewErrParamRequired("EndDate")) + } + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.StartDate == nil { + invalidParams.Add(request.NewErrParamRequired("StartDate")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input for a ListChangeBatchesByHostedZone request. +type ListChangeBatchesByHostedZoneOutput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The change batches within the given hosted zone and time period. + ChangeBatchRecords []*ChangeBatchRecord `locationNameList:"ChangeBatchRecord" min:"1" deprecated:"true" type:"list" required:"true"` + + // A flag that indicates if there are more change batches to list. + IsTruncated *bool `type:"boolean"` + + // The page marker. + Marker *string `type:"string" required:"true"` + + // The maximum number of items on a page. + MaxItems *string `type:"string" required:"true"` + + // The next page marker. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListChangeBatchesByHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByHostedZoneOutput) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByRRSet request. +type ListChangeBatchesByRRSetInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The end of the time period you want to see changes for. + EndDate *string `location:"querystring" locationName:"endDate" deprecated:"true" type:"string" required:"true"` + + // The ID of the hosted zone that you want to see changes for. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The page marker. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The maximum number of items on a page. + MaxItems *string `location:"querystring" locationName:"maxItems" type:"string"` + + // The name of the RRSet that you want to see changes for. + Name *string `location:"querystring" locationName:"rrSet_name" type:"string" required:"true"` + + // The identifier of the RRSet that you want to see changes for. + SetIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` + + // The start of the time period you want to see changes for. + StartDate *string `location:"querystring" locationName:"startDate" deprecated:"true" type:"string" required:"true"` + + // The type of the RRSet that you want to see changes for. + Type *string `location:"querystring" locationName:"type" type:"string" required:"true" enum:"RRType"` +} + +// String returns the string representation +func (s ListChangeBatchesByRRSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByRRSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListChangeBatchesByRRSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListChangeBatchesByRRSetInput"} + if s.EndDate == nil { + invalidParams.Add(request.NewErrParamRequired("EndDate")) + } + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SetIdentifier != nil && len(*s.SetIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SetIdentifier", 1)) + } + if s.StartDate == nil { + invalidParams.Add(request.NewErrParamRequired("StartDate")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input for a ListChangeBatchesByRRSet request. +type ListChangeBatchesByRRSetOutput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The change batches within the given hosted zone and time period. + ChangeBatchRecords []*ChangeBatchRecord `locationNameList:"ChangeBatchRecord" min:"1" deprecated:"true" type:"list" required:"true"` + + // A flag that indicates if there are more change batches to list. + IsTruncated *bool `type:"boolean"` + + // The page marker. + Marker *string `type:"string" required:"true"` + + // The maximum number of items on a page. + MaxItems *string `type:"string" required:"true"` + + // The next page marker. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListChangeBatchesByRRSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByRRSetOutput) GoString() string { + return s.String() +} + +// The input for a ListGeoLocations request. +type ListGeoLocationsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of geo locations you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // The first continent code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. For non-continent geo locations, + // this should be null. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + StartContinentCode *string `location:"querystring" locationName:"startcontinentcode" min:"2" type:"string"` + + // The first country code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + StartCountryCode *string `location:"querystring" locationName:"startcountrycode" min:"1" type:"string"` + + // The first subdivision code in the lexicographic ordering of geo locations + // that you want the ListGeoLocations request to list. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + StartSubdivisionCode *string `location:"querystring" locationName:"startsubdivisioncode" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGeoLocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGeoLocationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGeoLocationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGeoLocationsInput"} + if s.StartContinentCode != nil && len(*s.StartContinentCode) < 2 { + invalidParams.Add(request.NewErrParamMinLen("StartContinentCode", 2)) + } + if s.StartCountryCode != nil && len(*s.StartCountryCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StartCountryCode", 1)) + } + if s.StartSubdivisionCode != nil && len(*s.StartSubdivisionCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StartSubdivisionCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the geo locations that are +// returned by the request and information about the response. +type ListGeoLocationsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the geo locations that are + // returned by the request. + GeoLocationDetailsList []*GeoLocationDetails `locationNameList:"GeoLocationDetails" type:"list" required:"true"` + + // A flag that indicates whether there are more geo locations to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the values included in the NextContinentCode, + // NextCountryCode, and NextSubdivisionCode elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // If the results were truncated, the continent code of the next geo location + // in the list. This element is present only if IsTruncated is true and the + // next geo location to list is a continent location. + NextContinentCode *string `min:"2" type:"string"` + + // If the results were truncated, the country code of the next geo location + // in the list. This element is present only if IsTruncated is true and the + // next geo location to list is not a continent location. + NextCountryCode *string `min:"1" type:"string"` + + // If the results were truncated, the subdivision code of the next geo location + // in the list. This element is present only if IsTruncated is true and the + // next geo location has a subdivision. + NextSubdivisionCode *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGeoLocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGeoLocationsOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your health checks, send a GET request to the /Route +// 53 API version/healthcheck resource. The response to this request includes +// a HealthChecks element with zero or more HealthCheck child elements. By default, +// the list of health checks is displayed on a single page. You can control +// the length of the page that is displayed by using the MaxItems parameter. +// You can use the Marker parameter to control the health check that the list +// begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListHealthChecksInput struct { + _ struct{} `type:"structure"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of health checks to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHealthChecksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHealthChecksInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHealthChecksOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the health checks associated + // with the current AWS account. + HealthChecks []*HealthCheck `locationNameList:"HealthCheck" type:"list" required:"true"` + + // A flag indicating whether there are more health checks to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of health checks to be included in the response body. + // If the number of health checks associated with this AWS account exceeds MaxItems, + // the value of IsTruncated in the response is true. Call ListHealthChecks again + // and specify the value of NextMarker from the last response in the Marker + // element of the next request to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing health checks. If IsTruncated is true, + // make another request to ListHealthChecks and include the value of the NextMarker + // element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListHealthChecksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHealthChecksOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the /Route 53 API version/hostedzonesbyname resource. The response +// to this request includes a HostedZones element with zero or more HostedZone +// child elements lexicographically ordered by DNS name. By default, the list +// of hosted zones is displayed on a single page. You can control the length +// of the page that is displayed by using the MaxItems parameter. You can use +// the DNSName and HostedZoneId parameters to control the hosted zone that the +// list begins with. +type ListHostedZonesByNameInput struct { + _ struct{} `type:"structure"` + + // The first name in the lexicographic ordering of domain names that you want + // the ListHostedZonesByNameRequest request to list. + // + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + DNSName *string `location:"querystring" locationName:"dnsname" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + HostedZoneId *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesByNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByNameInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHostedZonesByNameOutput struct { + _ struct{} `type:"structure"` + + // The DNSName value sent in the request. + DNSName *string `type:"string"` + + // The HostedZoneId value sent in the request. + HostedZoneId *string `type:"string"` + + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the NextDNSName and NextHostedZoneId elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of IsTruncated in the ListHostedZonesByNameResponse is true. Call + // ListHostedZonesByName again and specify the value of NextDNSName and NextHostedZoneId + // elements from the previous response to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // If the value of IsTruncated in the ListHostedZonesByNameResponse is true, + // there are more hosted zones associated with the current AWS account. To get + // the next page of results, make another request to ListHostedZonesByName. + // Specify the value of NextDNSName in the DNSName parameter. Specify NextHostedZoneId + // in the HostedZoneId parameter. + NextDNSName *string `type:"string"` + + // If the value of IsTruncated in the ListHostedZonesByNameResponse is true, + // there are more hosted zones associated with the current AWS account. To get + // the next page of results, make another request to ListHostedZonesByName. + // Specify the value of NextDNSName in the DNSName parameter. Specify NextHostedZoneId + // in the HostedZoneId parameter. + NextHostedZoneId *string `type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesByNameOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByNameOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your hosted zones, send a GET request to the /Route +// 53 API version/hostedzone resource. The response to this request includes +// a HostedZones element with zero or more HostedZone child elements. By default, +// the list of hosted zones is displayed on a single page. You can control the +// length of the page that is displayed by using the MaxItems parameter. You +// can use the Marker parameter to control the hosted zone that the list begins +// with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a +// value greater than 100, Amazon Route 53 returns only the first 100. +type ListHostedZonesInput struct { + _ struct{} `type:"structure"` + + DelegationSetId *string `location:"querystring" locationName:"delegationsetid" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHostedZonesOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of IsTruncated in the response is true. Call ListHostedZones again + // and specify the value of NextMarker in the Marker parameter to get the next + // page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing hosted zones. If IsTruncated is true, + // make another request to ListHostedZones and include the value of the NextMarker + // element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesOutput) GoString() string { + return s.String() +} + +// The input for a ListResourceRecordSets request. +type ListResourceRecordSetsInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone that contains the resource record sets that you + // want to get. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The maximum number of records you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, specify the value of NextRecordIdentifier from the previous + // response to get the next resource record set that has the current DNS name + // and type. + StartRecordIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` + + // The first name in the lexicographic ordering of domain names that you want + // the ListResourceRecordSets request to list. + StartRecordName *string `location:"querystring" locationName:"name" type:"string"` + + // The DNS type at which to begin the listing of resource record sets. + // + // Valid values: A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT + // + // Values for Weighted Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Regional Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Alias Resource Record Sets: A | AAAA + // + // Constraint: Specifying type without specifying name returns an InvalidInput + // error. + StartRecordType *string `location:"querystring" locationName:"type" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListResourceRecordSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceRecordSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResourceRecordSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResourceRecordSetsInput"} + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.StartRecordIdentifier != nil && len(*s.StartRecordIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StartRecordIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the resource record sets that +// are returned by the request and information about the response. +type ListResourceRecordSetsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more resource record sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the NextRecordName element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, the value of SetIdentifier for the next resource record + // set that has the current DNS name and type. + NextRecordIdentifier *string `min:"1" type:"string"` + + // If the results were truncated, the name of the next record in the list. This + // element is present only if IsTruncated is true. + NextRecordName *string `type:"string"` + + // If the results were truncated, the type of the next record in the list. This + // element is present only if IsTruncated is true. + NextRecordType *string `type:"string" enum:"RRType"` + + // A complex type that contains information about the resource record sets that + // are returned by the request. + ResourceRecordSets []*ResourceRecordSet `locationNameList:"ResourceRecordSet" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListResourceRecordSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceRecordSetsOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the /Route 53 API version/delegationset resource. The response to this request +// includes a DelegationSets element with zero or more DelegationSet child elements. +// By default, the list of reusable delegation sets is displayed on a single +// page. You can control the length of the page that is displayed by using the +// MaxItems parameter. You can use the Marker parameter to control the delegation +// set that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListReusableDelegationSetsInput struct { + _ struct{} `type:"structure"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of reusable delegation sets to return per page + // of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListReusableDelegationSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReusableDelegationSetsInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListReusableDelegationSetsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the reusable delegation sets + // associated with the current AWS account. + DelegationSets []*DelegationSet `locationNameList:"DelegationSet" type:"list" required:"true"` + + // A flag indicating whether there are more reusable delegation sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of reusable delegation sets to be included in the response + // body. If the number of reusable delegation sets associated with this AWS + // account exceeds MaxItems, the value of IsTruncated in the response is true. + // To get the next page of results, call ListReusableDelegationSets again and + // specify the value of NextMarker from the previous response in the Marker + // element of the request. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing reusable delegation sets. If IsTruncated + // is true, make another request to ListReusableDelegationSets and include the + // value of the NextMarker element in the Marker element of the previous response + // to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListReusableDelegationSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReusableDelegationSetsOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request for a list of the tags +// that are associated with an individual resource. +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the resource for which you want to retrieve tags. + ResourceId *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing tags for the specified resource. +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A ResourceTagSet containing tags associated with the specified resource. + ResourceTagSet *ResourceTagSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request for a list of the tags +// that are associated with up to 10 specified resources. +type ListTagsForResourcesInput struct { + _ struct{} `locationName:"ListTagsForResourcesRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains the ResourceId element for each resource for + // which you want to get a list of tags. + ResourceIds []*string `locationNameList:"ResourceId" min:"1" type:"list" required:"true"` + + // The type of the resources. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ListTagsForResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourcesInput"} + if s.ResourceIds == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIds")) + } + if s.ResourceIds != nil && len(s.ResourceIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceIds", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing tags for the specified resources. +type ListTagsForResourcesOutput struct { + _ struct{} `type:"structure"` + + // A list of ResourceTagSets containing tags associated with the specified resources. + ResourceTagSets []*ResourceTagSet `locationNameList:"ResourceTagSet" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourcesOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list the +// traffic policies that are associated with the current AWS account. +type ListTrafficPoliciesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of traffic policies to be included in the response body + // for this request. If you have more than MaxItems traffic policies, the value + // of the IsTruncated element in the response is true, and the value of the + // TrafficPolicyIdMarker element is the ID of the first traffic policy in the + // next group of MaxItems traffic policies. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For your first request to ListTrafficPolicies, do not include the TrafficPolicyIdMarker + // parameter. + // + // If you have more traffic policies than the value of MaxItems, ListTrafficPolicies + // returns only the first MaxItems traffic policies. To get the next group of + // MaxItems policies, submit another request to ListTrafficPolicies. For the + // value of TrafficPolicyIdMarker, specify the value of the TrafficPolicyIdMarker + // element that was returned in the previous response. + // + // Policies are listed in the order in which they were created. + TrafficPolicyIdMarker *string `location:"querystring" locationName:"trafficpolicyid" type:"string"` +} + +// String returns the string representation +func (s ListTrafficPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPoliciesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policies to be listed. + // If the response was truncated, you can get the next group of MaxItems traffic + // policies by calling ListTrafficPolicies again and specifying the value of + // the TrafficPolicyIdMarker element in the TrafficPolicyIdMarker request parameter. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicies + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If the value of IsTruncated is true, TrafficPolicyIdMarker is the ID of the + // first traffic policy in the next group of MaxItems traffic policies. + TrafficPolicyIdMarker *string `type:"string" required:"true"` + + // A list that contains one TrafficPolicySummary element for each traffic policy + // that was created by the current AWS account. + TrafficPolicySummaries []*TrafficPolicySummary `locationNameList:"TrafficPolicySummary" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPoliciesOutput) GoString() string { + return s.String() +} + +// A request for the traffic policy instances that you created in a specified +// hosted zone. +type ListTrafficPolicyInstancesByHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone for which you want to list traffic policy instances. + HostedZoneId *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByHostedZone, omit this + // value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByHostedZone, omit this + // value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTrafficPolicyInstancesByHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTrafficPolicyInstancesByHostedZoneInput"} + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesByHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstancesByHostedZone + // again and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByHostedZone + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policy instances. +type ListTrafficPolicyInstancesByPolicyInput struct { + _ struct{} `type:"structure"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, HostedZoneIdMarker + // is the ID of the hosted zone for the first traffic policy instance in the + // next group of MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + HostedZoneIdMarker *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // The ID of the traffic policy for which you want to list traffic policy instances. + TrafficPolicyId *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` + + // The version of the traffic policy for which you want to list traffic policy + // instances. The version must be associated with the traffic policy that is + // specified by TrafficPolicyId. + TrafficPolicyVersion *int64 `location:"querystring" locationName:"version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTrafficPolicyInstancesByPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTrafficPolicyInstancesByPolicyInput"} + if s.TrafficPolicyId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyId")) + } + if s.TrafficPolicyVersion == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyVersion")) + } + if s.TrafficPolicyVersion != nil && *s.TrafficPolicyVersion < 1 { + invalidParams.Add(request.NewErrParamMinValue("TrafficPolicyVersion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesByPolicyOutput struct { + _ struct{} `type:"structure"` + + // If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of + // the first traffic policy instance in the next group of MaxItems traffic policy + // instances. + HostedZoneIdMarker *string `type:"string"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstancesByPolicy again + // and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByPolicy + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policy instances. +type ListTrafficPolicyInstancesInput struct { + _ struct{} `type:"structure"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, you have + // more traffic policy instances. To get the next group of MaxItems traffic + // policy instances, submit another ListTrafficPolicyInstances request. For + // the value of HostedZoneIdMarker, specify the value of HostedZoneIdMarker + // from the previous response, which is the hosted zone ID of the first traffic + // policy instance in the next group of MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + HostedZoneIdMarker *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesOutput struct { + _ struct{} `type:"structure"` + + // If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of + // the first traffic policy instance in the next group of MaxItems traffic policy + // instances. + HostedZoneIdMarker *string `type:"string"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstances again and + // specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstances + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policies. +type ListTrafficPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // Specify the value of Id of the traffic policy for which you want to list + // all versions. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The maximum number of traffic policy versions that you want Amazon Route + // 53 to include in the response body for this request. If the specified traffic + // policy has more than MaxItems versions, the value of the IsTruncated element + // in the response is true, and the value of the TrafficPolicyVersionMarker + // element is the ID of the first version in the next group of MaxItems traffic + // policy versions. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For your first request to ListTrafficPolicyVersions, do not include the TrafficPolicyVersionMarker + // parameter. + // + // If you have more traffic policy versions than the value of MaxItems, ListTrafficPolicyVersions + // returns only the first group of MaxItems versions. To get the next group + // of MaxItems traffic policy versions, submit another request to ListTrafficPolicyVersions. + // For the value of TrafficPolicyVersionMarker, specify the value of the TrafficPolicyVersionMarker + // element that was returned in the previous response. + // + // Traffic policy versions are listed in sequential order. + TrafficPolicyVersionMarker *string `location:"querystring" locationName:"trafficpolicyversion" type:"string"` +} + +// String returns the string representation +func (s ListTrafficPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTrafficPolicyVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTrafficPolicyVersionsInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policies to be listed. + // If the response was truncated, you can get the next group of maxitems traffic + // policies by calling ListTrafficPolicyVersions again and specifying the value + // of the NextMarker element in the marker parameter. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the maxitems parameter in the call to ListTrafficPolicyVersions + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // A list that contains one TrafficPolicy element for each traffic policy version + // that is associated with the specified traffic policy. + TrafficPolicies []*TrafficPolicy `locationNameList:"TrafficPolicy" type:"list" required:"true"` + + // If IsTruncated is true, the value of TrafficPolicyVersionMarker identifies + // the first traffic policy in the next group of MaxItems traffic policies. + // Call ListTrafficPolicyVersions again and specify the value of TrafficPolicyVersionMarker + // in the TrafficPolicyVersionMarker request parameter. + // + // This element is present only if IsTruncated is true. + TrafficPolicyVersionMarker *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyVersionsOutput) GoString() string { + return s.String() +} + +// A complex type that contains the value of the Value element for the current +// resource record set. +type ResourceRecord struct { + _ struct{} `type:"structure"` + + // The current or new DNS record value, not to exceed 4,000 characters. In the + // case of a DELETE action, if the current value does not match the actual value, + // an error is returned. For descriptions about how to format Value for different + // record types, see Supported DNS Resource Record Types (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) + // in the Amazon Route 53 Developer Guide. + // + // You can specify more than one value for all record types except CNAME and + // SOA. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResourceRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRecord) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceRecord) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceRecord"} + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the current resource record +// set. +type ResourceRecordSet struct { + _ struct{} `type:"structure"` + + // Alias resource record sets only: Information about the AWS resource to which + // you are redirecting traffic. + AliasTarget *AliasTarget `type:"structure"` + + // Failover resource record sets only: To configure failover, you add the Failover + // element to two resource record sets. For one resource record set, you specify + // PRIMARY as the value for Failover; for the other resource record set, you + // specify SECONDARY. In addition, you include the HealthCheckId element and + // specify the health check that you want Amazon Route 53 to perform for each + // resource record set. + // + // You can create failover and failover alias resource record sets only in + // public hosted zones. Except where noted, the following failover behaviors + // assume that you have included the HealthCheckId element in both resource + // record sets: + // + // When the primary resource record set is healthy, Amazon Route 53 responds + // to DNS queries with the applicable value from the primary resource record + // set regardless of the health of the secondary resource record set. When the + // primary resource record set is unhealthy and the secondary resource record + // set is healthy, Amazon Route 53 responds to DNS queries with the applicable + // value from the secondary resource record set. When the secondary resource + // record set is unhealthy, Amazon Route 53 responds to DNS queries with the + // applicable value from the primary resource record set regardless of the health + // of the primary resource record set. If you omit the HealthCheckId element + // for the secondary resource record set, and if the primary resource record + // set is unhealthy, Amazon Route 53 always responds to DNS queries with the + // applicable value from the secondary resource record set. This is true regardless + // of the health of the associated endpoint. You cannot create non-failover + // resource record sets that have the same values for the Name and Type elements + // as failover resource record sets. + // + // For failover alias resource record sets, you must also include the EvaluateTargetHealth + // element and set the value to true. + // + // For more information about configuring failover for Amazon Route 53, see + // Amazon Route 53 Health Checks and DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // in the Amazon Route 53 Developer Guide. + // + // Valid values: PRIMARY | SECONDARY + Failover *string `type:"string" enum:"ResourceRecordSetFailover"` + + // Geo location resource record sets only: A complex type that lets you control + // how Amazon Route 53 responds to DNS queries based on the geographic origin + // of the query. For example, if you want all queries from Africa to be routed + // to a web server with an IP address of 192.0.2.111, create a resource record + // set with a Type of A and a ContinentCode of AF. + // + // You can create geolocation and geolocation alias resource record sets only + // in public hosted zones. If you create separate resource record sets for overlapping + // geographic regions (for example, one resource record set for a continent + // and one for a country on the same continent), priority goes to the smallest + // geographic region. This allows you to route most queries for a continent + // to one resource and to route queries for a country on that continent to a + // different resource. + // + // You cannot create two geolocation resource record sets that specify the + // same geographic location. + // + // The value * in the CountryCode element matches all geographic locations + // that aren't specified in other geolocation resource record sets that have + // the same values for the Name and Type elements. + // + // Geolocation works by mapping IP addresses to locations. However, some IP + // addresses aren't mapped to geographic locations, so even if you create geolocation + // resource record sets that cover all seven continents, Amazon Route 53 will + // receive some DNS queries from locations that it can't identify. We recommend + // that you create a resource record set for which the value of CountryCode + // is *, which handles both queries that come from locations for which you haven't + // created geolocation resource record sets and queries from IP addresses that + // aren't mapped to a location. If you don't create a * resource record set, + // Amazon Route 53 returns a "no answer" response for queries from those locations. + // You cannot create non-geolocation resource record sets that have the same + // values for the Name and Type elements as geolocation resource record sets. + GeoLocation *GeoLocation `type:"structure"` + + // Health Check resource record sets only, not required for alias resource record + // sets: An identifier that is used to identify health check associated with + // the resource record set. + HealthCheckId *string `type:"string"` + + // The name of the domain you want to perform the action on. + // + // Enter a fully qualified domain name, for example, www.example.com. You can + // optionally include a trailing dot. If you omit the trailing dot, Amazon Route + // 53 still assumes that the domain name that you specify is fully qualified. + // This means that Amazon Route 53 treats www.example.com (without a trailing + // dot) and www.example.com. (with a trailing dot) as identical. + // + // For information about how to specify characters other than a-z, 0-9, and + // - (hyphen) and how to specify internationalized domain names, see DNS Domain + // Name Format (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html) + // in the Amazon Route 53 Developer Guide. + // + // You can use an asterisk (*) character in the name. DNS treats the * character + // either as a wildcard or as the * character (ASCII 42), depending on where + // it appears in the name. For more information, see Using an Asterisk (*) in + // the Names of Hosted Zones and Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html#domain-name-format-asterisk) + // in the Amazon Route 53 Developer Guide + // + // You can't use the * wildcard for resource records sets that have a type + // of NS. + Name *string `type:"string" required:"true"` + + // Latency-based resource record sets only: The Amazon EC2 region where the + // resource that is specified in this resource record set resides. The resource + // typically is an AWS resource, such as an Amazon EC2 instance or an ELB load + // balancer, and is referred to by an IP address or a DNS domain name, depending + // on the record type. + // + // You can create latency and latency alias resource record sets only in public + // hosted zones. When Amazon Route 53 receives a DNS query for a domain name + // and type for which you have created latency resource record sets, Amazon + // Route 53 selects the latency resource record set that has the lowest latency + // between the end user and the associated Amazon EC2 region. Amazon Route 53 + // then returns the value that is associated with the selected resource record + // set. + // + // Note the following: + // + // You can only specify one ResourceRecord per latency resource record set. + // You can only create one latency resource record set for each Amazon EC2 region. + // You are not required to create latency resource record sets for all Amazon + // EC2 regions. Amazon Route 53 will choose the region with the best latency + // from among the regions for which you create latency resource record sets. + // You cannot create non-latency resource record sets that have the same values + // for the Name and Type elements as latency resource record sets. + Region *string `min:"1" type:"string" enum:"ResourceRecordSetRegion"` + + // A complex type that contains the resource records for the current resource + // record set. + ResourceRecords []*ResourceRecord `locationNameList:"ResourceRecord" min:"1" type:"list"` + + // Weighted, Latency, Geo, and Failover resource record sets only: An identifier + // that differentiates among multiple resource record sets that have the same + // combination of DNS name and type. The value of SetIdentifier must be unique + // for each resource record set that has the same combination of DNS name and + // type. + SetIdentifier *string `min:"1" type:"string"` + + // The cache time to live for the current resource record set. Note the following: + // + // If you're creating a non-alias resource record set, TTL is required. If + // you're creating an alias resource record set, omit TTL. Amazon Route 53 uses + // the value of TTL for the alias target. If you're associating this resource + // record set with a health check (if you're adding a HealthCheckId element), + // we recommend that you specify a TTL of 60 seconds or less so clients respond + // quickly to changes in health status. All of the resource record sets in a + // group of weighted, latency, geolocation, or failover resource record sets + // must have the same value for TTL. If a group of weighted resource record + // sets includes one or more weighted alias resource record sets for which the + // alias target is an ELB load balancer, we recommend that you specify a TTL + // of 60 seconds for all of the non-alias weighted resource record sets that + // have the same name and type. Values other than 60 seconds (the TTL for load + // balancers) will change the effect of the values that you specify for Weight. + TTL *int64 `type:"long"` + + TrafficPolicyInstanceId *string `type:"string"` + + // The DNS record type. For information about different record types and how + // data is encoded for them, see Supported DNS Resource Record Types (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) + // in the Amazon Route 53 Developer Guide. + // + // Valid values for basic resource record sets: A | AAAA | CNAME | MX | NS + // | PTR | SOA | SPF | SRV | TXT + // + // Values for weighted, latency, geolocation, and failover resource record + // sets: A | AAAA | CNAME | MX | PTR | SPF | SRV | TXT. When creating a group + // of weighted, latency, geolocation, or failover resource record sets, specify + // the same value for all of the resource record sets in the group. + // + // SPF records were formerly used to verify the identity of the sender of email + // messages. However, we no longer recommend that you create resource record + // sets for which the value of Type is SPF. RFC 7208, Sender Policy Framework + // (SPF) for Authorizing Use of Domains in Email, Version 1, has been updated + // to say, "...[I]ts existence and mechanism defined in [RFC4408] have led to + // some interoperability issues. Accordingly, its use is no longer appropriate + // for SPF version 1; implementations are not to use it." In RFC 7208, see section + // 14.1, The SPF DNS Record Type (http://tools.ietf.org/html/rfc7208#section-14.1). + // Values for alias resource record sets: + // + // CloudFront distributions: A ELB load balancers: A | AAAA Amazon S3 buckets: + // A Another resource record set in this hosted zone: Specify the type of the + // resource record set for which you're creating the alias. Specify any value + // except NS or SOA. + Type *string `type:"string" required:"true" enum:"RRType"` + + // Weighted resource record sets only: Among resource record sets that have + // the same combination of DNS name and type, a value that determines the proportion + // of DNS queries that Amazon Route 53 responds to using the current resource + // record set. Amazon Route 53 calculates the sum of the weights for the resource + // record sets that have the same combination of DNS name and type. Amazon Route + // 53 then responds to queries based on the ratio of a resource's weight to + // the total. Note the following: + // + // You must specify a value for the Weight element for every weighted resource + // record set. You can only specify one ResourceRecord per weighted resource + // record set. You cannot create latency, failover, or geolocation resource + // record sets that have the same values for the Name and Type elements as weighted + // resource record sets. You can create a maximum of 100 weighted resource record + // sets that have the same values for the Name and Type elements. For weighted + // (but not weighted alias) resource record sets, if you set Weight to 0 for + // a resource record set, Amazon Route 53 never responds to queries with the + // applicable value for that resource record set. However, if you set Weight + // to 0 for all resource record sets that have the same combination of DNS name + // and type, traffic is routed to all resources with equal probability. + // + // The effect of setting Weight to 0 is different when you associate health + // checks with weighted resource record sets. For more information, see Options + // for Configuring Amazon Route 53 Active-Active and Active-Passive Failover + // (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) + // in the Amazon Route 53 Developer Guide. + Weight *int64 `type:"long"` +} + +// String returns the string representation +func (s ResourceRecordSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRecordSet) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceRecordSet) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceRecordSet"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Region != nil && len(*s.Region) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Region", 1)) + } + if s.ResourceRecords != nil && len(s.ResourceRecords) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceRecords", 1)) + } + if s.SetIdentifier != nil && len(*s.SetIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SetIdentifier", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.AliasTarget != nil { + if err := s.AliasTarget.Validate(); err != nil { + invalidParams.AddNested("AliasTarget", err.(request.ErrInvalidParams)) + } + } + if s.GeoLocation != nil { + if err := s.GeoLocation.Validate(); err != nil { + invalidParams.AddNested("GeoLocation", err.(request.ErrInvalidParams)) + } + } + if s.ResourceRecords != nil { + for i, v := range s.ResourceRecords { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceRecords", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing a resource and its associated tags. +type ResourceTagSet struct { + _ struct{} `type:"structure"` + + // The ID for the specified resource. + ResourceId *string `type:"string"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `type:"string" enum:"TagResourceType"` + + // The tags associated with the specified resource. + Tags []*Tag `locationNameList:"Tag" min:"1" type:"list"` +} + +// String returns the string representation +func (s ResourceTagSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceTagSet) GoString() string { + return s.String() +} + +// A complex type that contains information about the health check status for +// the current observation. +type StatusReport struct { + _ struct{} `type:"structure"` + + // The date and time the health check status was observed, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + CheckedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The observed health check status. + Status *string `type:"string"` +} + +// String returns the string representation +func (s StatusReport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatusReport) GoString() string { + return s.String() +} + +// A single tag containing a key and value. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a Tag. + Key *string `type:"string"` + + // The value for a Tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +type TrafficPolicy struct { + _ struct{} `type:"structure"` + + Comment *string `type:"string"` + + Document *string `type:"string" required:"true"` + + Id *string `type:"string" required:"true"` + + Name *string `type:"string" required:"true"` + + Type *string `type:"string" required:"true" enum:"RRType"` + + Version *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrafficPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicy) GoString() string { + return s.String() +} + +type TrafficPolicyInstance struct { + _ struct{} `type:"structure"` + + HostedZoneId *string `type:"string" required:"true"` + + Id *string `type:"string" required:"true"` + + Message *string `type:"string" required:"true"` + + Name *string `type:"string" required:"true"` + + State *string `type:"string" required:"true"` + + TTL *int64 `type:"long" required:"true"` + + TrafficPolicyId *string `type:"string" required:"true"` + + TrafficPolicyType *string `type:"string" required:"true" enum:"RRType"` + + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrafficPolicyInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicyInstance) GoString() string { + return s.String() +} + +type TrafficPolicySummary struct { + _ struct{} `type:"structure"` + + Id *string `type:"string" required:"true"` + + LatestVersion *int64 `min:"1" type:"integer" required:"true"` + + Name *string `type:"string" required:"true"` + + TrafficPolicyCount *int64 `min:"1" type:"integer" required:"true"` + + Type *string `type:"string" required:"true" enum:"RRType"` +} + +// String returns the string representation +func (s TrafficPolicySummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicySummary) GoString() string { + return s.String() +} + +// >A complex type that contains information about the request to update a health +// check. +type UpdateHealthCheckInput struct { + _ struct{} `locationName:"UpdateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains information to uniquely identify the CloudWatch + // alarm that you're associating with a Route 53 health check. + AlarmIdentifier *AlarmIdentifier `type:"structure"` + + // For a specified parent health check, a list of HealthCheckId values for the + // associated child health checks. + // + // Specify this value only if you want to change it. + ChildHealthChecks []*string `locationNameList:"ChildHealthCheck" type:"list"` + + // Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName + // to the endpoint in the client_hello message during TLS negotiation. If you + // don't specify a value for EnableSNI, Amazon Route 53 defaults to true when + // Type is HTTPS or HTTPS_STR_MATCH and defaults to false when Type is any other + // value. + // + // Specify this value only if you want to change it. + EnableSNI *bool `type:"boolean"` + + // The number of consecutive health checks that an endpoint must pass or fail + // for Amazon Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + // + // Specify this value only if you want to change it. + FailureThreshold *int64 `min:"1" type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + // + // Specify this value only if you want to change it. + FullyQualifiedDomainName *string `type:"string"` + + // The ID of the health check to update. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` + + // Optional. When you specify a health check version, Amazon Route 53 compares + // this value with the current value in the health check, which prevents you + // from updating the health check when the versions don't match. Using HealthCheckVersion + // lets you prevent overwriting another change to the health check. + HealthCheckVersion *int64 `min:"1" type:"long"` + + // The minimum number of child health checks that must be healthy for Amazon + // Route 53 to consider the parent health check to be healthy. Valid values + // are integers between 0 and 256, inclusive. + // + // Specify this value only if you want to change it. + HealthThreshold *int64 `type:"integer"` + + // The IP address of the resource that you want to check. + // + // Specify this value only if you want to change it. + IPAddress *string `type:"string"` + + InsufficientDataHealthStatus *string `type:"string" enum:"InsufficientDataHealthStatus"` + + // A boolean value that indicates whether the status of health check should + // be inverted. For example, if a health check is healthy but Inverted is True, + // then Amazon Route 53 considers the health check to be unhealthy. + // + // Specify this value only if you want to change it. + Inverted *bool `type:"boolean"` + + // The port on which you want Amazon Route 53 to open a connection to perform + // health checks. + // + // Specify this value only if you want to change it. + Port *int64 `min:"1" type:"integer"` + + // A list of HealthCheckRegion values that specify the Amazon EC2 regions that + // you want Amazon Route 53 to use to perform health checks. You must specify + // at least three regions. + // + // When you remove a region from the list, Amazon Route 53 will briefly continue + // to check your endpoint from that region. Specify this value only if you want + // to change it. + Regions []*string `locationNameList:"Region" min:"1" type:"list"` + + // The path that you want Amazon Route 53 to request when performing health + // checks. The path can be any value for which your endpoint will return an + // HTTP status code of 2xx or 3xx when the endpoint is healthy, for example + // the file /docs/route53-health-check.html. + // + // Specify this value only if you want to change it. + ResourcePath *string `type:"string"` + + // If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string that + // you want Amazon Route 53 to search for in the response body from the specified + // resource. If the string appears in the response body, Amazon Route 53 considers + // the resource healthy. Amazon Route 53 considers case when searching for SearchString + // in the response body. + // + // Specify this value only if you want to change it. + SearchString *string `type:"string"` +} + +// String returns the string representation +func (s UpdateHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHealthCheckInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateHealthCheckInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateHealthCheckInput"} + if s.FailureThreshold != nil && *s.FailureThreshold < 1 { + invalidParams.Add(request.NewErrParamMinValue("FailureThreshold", 1)) + } + if s.HealthCheckId == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckId")) + } + if s.HealthCheckVersion != nil && *s.HealthCheckVersion < 1 { + invalidParams.Add(request.NewErrParamMinValue("HealthCheckVersion", 1)) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.Regions != nil && len(s.Regions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regions", 1)) + } + if s.AlarmIdentifier != nil { + if err := s.AlarmIdentifier.Validate(); err != nil { + invalidParams.AddNested("AlarmIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to update a hosted +// zone comment. +type UpdateHostedZoneCommentInput struct { + _ struct{} `locationName:"UpdateHostedZoneCommentRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A comment about your hosted zone. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateHostedZoneCommentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHostedZoneCommentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateHostedZoneCommentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateHostedZoneCommentInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing information about the specified hosted zone after +// the update. +type UpdateHostedZoneCommentOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contain information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateHostedZoneCommentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHostedZoneCommentOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy for which +// you want to update the comment. +type UpdateTrafficPolicyCommentInput struct { + _ struct{} `locationName:"UpdateTrafficPolicyCommentRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The new comment for the specified traffic policy and version. + Comment *string `type:"string" required:"true"` + + // The value of Id for the traffic policy for which you want to update the comment. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of Version for the traffic policy for which you want to update + // the comment. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyCommentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyCommentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTrafficPolicyCommentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTrafficPolicyCommentInput"} + if s.Comment == nil { + invalidParams.Add(request.NewErrParamRequired("Comment")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && *s.Version < 1 { + invalidParams.Add(request.NewErrParamMinValue("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the traffic policy. +type UpdateTrafficPolicyCommentOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the specified traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyCommentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyCommentOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// you want to update based on a specified traffic policy instance. +type UpdateTrafficPolicyInstanceInput struct { + _ struct{} `locationName:"UpdateTrafficPolicyInstanceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The ID of the traffic policy instance that you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The TTL that you want Amazon Route 53 to assign to all of the updated resource + // record sets. + TTL *int64 `type:"long" required:"true"` + + // The ID of the traffic policy that you want Amazon Route 53 to use to update + // resource record sets for the specified traffic policy instance. + TrafficPolicyId *string `type:"string" required:"true"` + + // The version of the traffic policy that you want Amazon Route 53 to use to + // update resource record sets for the specified traffic policy instance. + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTrafficPolicyInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTrafficPolicyInstanceInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.TTL == nil { + invalidParams.Add(request.NewErrParamRequired("TTL")) + } + if s.TrafficPolicyId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyId")) + } + if s.TrafficPolicyVersion == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyVersion")) + } + if s.TrafficPolicyVersion != nil && *s.TrafficPolicyVersion < 1 { + invalidParams.Add(request.NewErrParamMinValue("TrafficPolicyVersion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the resource record sets that +// Amazon Route 53 created based on a specified traffic policy. +type UpdateTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the updated traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +type VPC struct { + _ struct{} `type:"structure"` + + // A VPC ID + VPCId *string `type:"string"` + + VPCRegion *string `min:"1" type:"string" enum:"VPCRegion"` +} + +// String returns the string representation +func (s VPC) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VPC) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VPC) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VPC"} + if s.VPCRegion != nil && len(*s.VPCRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VPCRegion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum ChangeAction + ChangeActionCreate = "CREATE" + // @enum ChangeAction + ChangeActionDelete = "DELETE" + // @enum ChangeAction + ChangeActionUpsert = "UPSERT" +) + +const ( + // @enum ChangeStatus + ChangeStatusPending = "PENDING" + // @enum ChangeStatus + ChangeStatusInsync = "INSYNC" +) + +const ( + // @enum CloudWatchRegion + CloudWatchRegionUsEast1 = "us-east-1" + // @enum CloudWatchRegion + CloudWatchRegionUsWest1 = "us-west-1" + // @enum CloudWatchRegion + CloudWatchRegionUsWest2 = "us-west-2" + // @enum CloudWatchRegion + CloudWatchRegionEuCentral1 = "eu-central-1" + // @enum CloudWatchRegion + CloudWatchRegionEuWest1 = "eu-west-1" + // @enum CloudWatchRegion + CloudWatchRegionApSoutheast1 = "ap-southeast-1" + // @enum CloudWatchRegion + CloudWatchRegionApSoutheast2 = "ap-southeast-2" + // @enum CloudWatchRegion + CloudWatchRegionApNortheast1 = "ap-northeast-1" + // @enum CloudWatchRegion + CloudWatchRegionApNortheast2 = "ap-northeast-2" + // @enum CloudWatchRegion + CloudWatchRegionSaEast1 = "sa-east-1" +) + +const ( + // @enum ComparisonOperator + ComparisonOperatorGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" + // @enum ComparisonOperator + ComparisonOperatorGreaterThanThreshold = "GreaterThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanThreshold = "LessThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" +) + +// An Amazon EC2 region that you want Amazon Route 53 to use to perform health +// checks. +const ( + // @enum HealthCheckRegion + HealthCheckRegionUsEast1 = "us-east-1" + // @enum HealthCheckRegion + HealthCheckRegionUsWest1 = "us-west-1" + // @enum HealthCheckRegion + HealthCheckRegionUsWest2 = "us-west-2" + // @enum HealthCheckRegion + HealthCheckRegionEuWest1 = "eu-west-1" + // @enum HealthCheckRegion + HealthCheckRegionApSoutheast1 = "ap-southeast-1" + // @enum HealthCheckRegion + HealthCheckRegionApSoutheast2 = "ap-southeast-2" + // @enum HealthCheckRegion + HealthCheckRegionApNortheast1 = "ap-northeast-1" + // @enum HealthCheckRegion + HealthCheckRegionSaEast1 = "sa-east-1" +) + +const ( + // @enum HealthCheckType + HealthCheckTypeHttp = "HTTP" + // @enum HealthCheckType + HealthCheckTypeHttps = "HTTPS" + // @enum HealthCheckType + HealthCheckTypeHttpStrMatch = "HTTP_STR_MATCH" + // @enum HealthCheckType + HealthCheckTypeHttpsStrMatch = "HTTPS_STR_MATCH" + // @enum HealthCheckType + HealthCheckTypeTcp = "TCP" + // @enum HealthCheckType + HealthCheckTypeCalculated = "CALCULATED" + // @enum HealthCheckType + HealthCheckTypeCloudwatchMetric = "CLOUDWATCH_METRIC" +) + +const ( + // @enum InsufficientDataHealthStatus + InsufficientDataHealthStatusHealthy = "Healthy" + // @enum InsufficientDataHealthStatus + InsufficientDataHealthStatusUnhealthy = "Unhealthy" + // @enum InsufficientDataHealthStatus + InsufficientDataHealthStatusLastKnownStatus = "LastKnownStatus" +) + +const ( + // @enum RRType + RRTypeSoa = "SOA" + // @enum RRType + RRTypeA = "A" + // @enum RRType + RRTypeTxt = "TXT" + // @enum RRType + RRTypeNs = "NS" + // @enum RRType + RRTypeCname = "CNAME" + // @enum RRType + RRTypeMx = "MX" + // @enum RRType + RRTypePtr = "PTR" + // @enum RRType + RRTypeSrv = "SRV" + // @enum RRType + RRTypeSpf = "SPF" + // @enum RRType + RRTypeAaaa = "AAAA" +) + +const ( + // @enum ResourceRecordSetFailover + ResourceRecordSetFailoverPrimary = "PRIMARY" + // @enum ResourceRecordSetFailover + ResourceRecordSetFailoverSecondary = "SECONDARY" +) + +const ( + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsEast1 = "us-east-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsWest1 = "us-west-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsWest2 = "us-west-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionEuWest1 = "eu-west-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionEuCentral1 = "eu-central-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSoutheast1 = "ap-southeast-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSoutheast2 = "ap-southeast-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApNortheast1 = "ap-northeast-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApNortheast2 = "ap-northeast-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionSaEast1 = "sa-east-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionCnNorth1 = "cn-north-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSouth1 = "ap-south-1" +) + +const ( + // @enum Statistic + StatisticAverage = "Average" + // @enum Statistic + StatisticSum = "Sum" + // @enum Statistic + StatisticSampleCount = "SampleCount" + // @enum Statistic + StatisticMaximum = "Maximum" + // @enum Statistic + StatisticMinimum = "Minimum" +) + +const ( + // @enum TagResourceType + TagResourceTypeHealthcheck = "healthcheck" + // @enum TagResourceType + TagResourceTypeHostedzone = "hostedzone" +) + +const ( + // @enum VPCRegion + VPCRegionUsEast1 = "us-east-1" + // @enum VPCRegion + VPCRegionUsWest1 = "us-west-1" + // @enum VPCRegion + VPCRegionUsWest2 = "us-west-2" + // @enum VPCRegion + VPCRegionEuWest1 = "eu-west-1" + // @enum VPCRegion + VPCRegionEuCentral1 = "eu-central-1" + // @enum VPCRegion + VPCRegionApSoutheast1 = "ap-southeast-1" + // @enum VPCRegion + VPCRegionApSoutheast2 = "ap-southeast-2" + // @enum VPCRegion + VPCRegionApSouth1 = "ap-south-1" + // @enum VPCRegion + VPCRegionApNortheast1 = "ap-northeast-1" + // @enum VPCRegion + VPCRegionApNortheast2 = "ap-northeast-2" + // @enum VPCRegion + VPCRegionSaEast1 = "sa-east-1" + // @enum VPCRegion + VPCRegionCnNorth1 = "cn-north-1" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go new file mode 100644 index 000000000..91af196e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go @@ -0,0 +1,30 @@ +package route53 + +import ( + "regexp" + + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +func init() { + initClient = func(c *client.Client) { + c.Handlers.Build.PushBack(sanitizeURL) + } + + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opChangeResourceRecordSets: + r.Handlers.UnmarshalError.Remove(restxml.UnmarshalErrorHandler) + r.Handlers.UnmarshalError.PushBack(unmarshalChangeResourceRecordSetsError) + } + } +} + +var reSanitizeURL = regexp.MustCompile(`\/%2F\w+%2F`) + +func sanitizeURL(r *request.Request) { + r.HTTPRequest.URL.Opaque = + reSanitizeURL.ReplaceAllString(r.HTTPRequest.URL.Opaque, "/") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/service.go b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go new file mode 100644 index 000000000..269c4db36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// Route53 is a client for Route 53. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Route53 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "route53" + +// New creates a new instance of the Route53 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Route53 client from just a session. +// svc := route53.New(mySession) +// +// // Create a Route53 client with additional configuration +// svc := route53.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Route53 { + svc := &Route53{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-04-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Route53 operation and runs any +// custom request initialization. +func (c *Route53) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go new file mode 100644 index 000000000..266e9a8ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go @@ -0,0 +1,77 @@ +package route53 + +import ( + "bytes" + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +type baseXMLErrorResponse struct { + XMLName xml.Name +} + +type standardXMLErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type invalidChangeBatchXMLErrorResponse struct { + XMLName xml.Name `xml:"InvalidChangeBatch"` + Messages []string `xml:"Messages>Message"` +} + +func unmarshalChangeResourceRecordSetsError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + responseBody, err := ioutil.ReadAll(r.HTTPResponse.Body) + + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read Route53 XML error response", err) + return + } + + baseError := &baseXMLErrorResponse{} + + if err := xml.Unmarshal(responseBody, baseError); err != nil { + r.Error = awserr.New("SerializationError", "failed to decode Route53 XML error response", err) + return + } + + switch baseError.XMLName.Local { + case "InvalidChangeBatch": + unmarshalInvalidChangeBatchError(r, responseBody) + default: + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(responseBody)) + restxml.UnmarshalError(r) + } +} + +func unmarshalInvalidChangeBatchError(r *request.Request, requestBody []byte) { + resp := &invalidChangeBatchXMLErrorResponse{} + err := xml.Unmarshal(requestBody, resp) + + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) + return + } + + const errorCode = "InvalidChangeBatch" + errors := []error{} + + for _, msg := range resp.Messages { + errors = append(errors, awserr.New(errorCode, msg, nil)) + } + + r.Error = awserr.NewRequestFailure( + awserr.NewBatchError(errorCode, "ChangeBatch errors occurred", errors), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/route53/waiters.go new file mode 100644 index 000000000..04786169e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/waiters.go @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Route53) WaitUntilResourceRecordSetsChanged(input *GetChangeInput) error { + waiterCfg := waiter.Config{ + Operation: "GetChange", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "ChangeInfo.Status", + Expected: "INSYNC", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 000000000..f11e8675f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,1653 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sts provides a client for AWS Security Token Service. +package sts + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) that you can use to access +// AWS resources that you might not normally have access to. Typically, you +// use AssumeRole for cross-account access or federation. For a comparison of +// AssumeRole with the other APIs that produce temporary credentials, see Requesting +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Important: You cannot call AssumeRole by using AWS root account credentials; +// access is denied. You must use credentials for an IAM user or an IAM role +// to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account and +// then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// in the IAM User Guide. +// +// For federation, you can, for example, grant single sign-on access to the +// AWS Management Console. If you already have an identity and authentication +// system in your corporate network, you don't have to recreate user identities +// in AWS in order to grant those user identities access to AWS. Instead, after +// a user has been authenticated, you call AssumeRole (and specify the role +// with the appropriate permissions) to get temporary security credentials for +// that user. With those temporary security credentials, you construct a sign-in +// URL that users can use to access the console. For more information, see Common +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// in the IAM User Guide. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a +// maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRole can be used to +// make API calls to any AWS service with the following exception: you cannot +// call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you +// choose not to pass a policy, the temporary security credentials that are +// returned by the operation have the permissions that are defined in the access +// policy of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to +// further restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// To assume a role, your AWS account must be trusted by the role. The trust +// relationship is defined in the role's trust policy when the role is created. +// That trust policy states which accounts are allowed to delegate access to +// this account's role. +// +// The user who wants to access the role must also have permissions delegated +// from the role's administrator. If the user is in a different account than +// the role, then the user's administrator must attach a policy that allows +// the user to call AssumeRole on the ARN of the role in the other account. +// If the user is in the same account as the role, then you can either attach +// a policy to the user (identical to the previous different account user), +// or you can add the user as a principal directly in the role's trust policy +// +// Using MFA with AssumeRole +// +// You can optionally include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios in which +// you want to make sure that the user who is assuming the role has been authenticated +// using an AWS MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication; if the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA devices produces. +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + err := req.Send() + return out, err +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithSAML method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleWithSAMLOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. The duration +// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). +// The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithSAML can be +// used to make API calls to any AWS service with the following exception: you +// cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you +// choose not to pass a policy, the temporary security credentials that are +// returned by the operation have the permissions that are defined in the access +// policy of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by the intersection of both the access policy +// of the role that is being assumed, and the policy that you pass. This means +// that both policies must grant the permission for the action to be allowed. +// This gives you a way to further restrict the permissions for the resulting +// temporary security credentials. You cannot use the passed policy to grant +// permissions that are in excess of those allowed by the access policy of the +// role that is being assumed. For more information, see Permissions for AssumeRole, +// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithSAML, you must configure +// your SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider, and create +// an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the Persistent +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// For more information, see the following resources: +// +// About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + err := req.Send() + return out, err +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithWebIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleWithWebIdentityOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider, such as Amazon +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible +// identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You +// can use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely +// identify a user and supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application, and without deploying server-based +// proxy services that use long-term AWS credentials. Instead, the identity +// of the caller is validated by using a token from the web identity provider. +// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce +// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service APIs. +// +// The credentials are valid for the duration that you specified when calling +// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to +// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithWebIdentity +// can be used to make API calls to any AWS service with the following exception: +// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you +// choose not to pass a policy, the temporary security credentials that are +// returned by the operation have the permissions that are defined in the access +// policy of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to +// further restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security credentials, +// and then using those credentials to make a request to AWS. +// +// AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android +// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample apps +// that show how to invoke the identity providers, and then how to use the information +// from these providers to get and use temporary security credentials. +// +// Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// This article discusses web identity federation and shows an example of how +// to use web identity federation to get access to content in Amazon S3. +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DecodeAuthorizationMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &DecodeAuthorizationMessageOutput{} + req.Data = output + return +} + +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an action that he or +// she has requested, the request returns a Client.UnauthorizedOperation response +// (an HTTP 403 response). Some AWS actions additionally return an encoded message +// that can provide details about this authorization failure. +// +// Only certain AWS actions return an encoded authorization message. The documentation +// for an individual action indicates whether that action returns an encoded +// message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status +// can constitute privileged information that the user who requested the action +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// Whether the request was denied due to an explicit deny or due to the absence +// of an explicit allow. For more information, see Determining Whether a Request +// is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// The principal who made the request. +// +// The requested action. +// +// The requested resource. +// +// The values of condition keys in the context of the user's request. +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + err := req.Send() + return out, err +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCallerIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCallerIdentityOutput{} + req.Data = output + return +} + +// Returns details about the IAM identity whose credentials are used to call +// the API. +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + err := req.Send() + return out, err +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFederationToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetFederationTokenOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. Because +// you must call the GetFederationToken action using the long-term security +// credentials of an IAM user, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other APIs that produce temporary +// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// If you are creating a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider, we recommend that you +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// The GetFederationToken action must be called by using the long-term AWS +// security credentials of an IAM user. You can also call GetFederationToken +// using the security credentials of an AWS root account, but we do not recommended +// it. Instead, we recommend that you create an IAM user for the purpose of +// the proxy application and then attach a policy to the IAM user that limits +// federated users to only the actions and resources that they need access to. +// For more information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// The temporary security credentials that are obtained by using the long-term +// credentials of an IAM user are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default +// is 43200 seconds (12 hours). Temporary credentials that are obtained by using +// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). +// +// The temporary security credentials created by GetFederationToken can be +// used to make API calls to any AWS service with the following exceptions: +// +// You cannot use these credentials to call any IAM APIs. +// +// You cannot call any STS APIs. +// +// Permissions +// +// The permissions for the temporary security credentials returned by GetFederationToken +// are determined by a combination of the following: +// +// The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. +// +// The policy that is passed as a parameter in the call. +// +// The passed policy is attached to the temporary security credentials that +// result from the GetFederationToken API call--that is, to the federated user. +// When the federated user makes an AWS request, AWS evaluates the policy attached +// to the federated user in combination with the policy or policies attached +// to the IAM user whose credentials were used to call GetFederationToken. AWS +// allows the federated user's request only when both the federated user and +// the IAM user are explicitly allowed to perform the requested action. The +// passed policy cannot grant more permissions than those that are defined in +// the IAM user policy. +// +// A typical use case is that the permissions of the IAM user whose credentials +// are used to call GetFederationToken are designed to allow access to all the +// actions and resources that any federated user will need. Then, for individual +// users, you pass a policy to the operation that scopes down the permissions +// to a level that's appropriate to that individual user, using a policy that +// allows only a subset of permissions that are granted to the IAM user. +// +// If you do not pass a policy, the resulting temporary security credentials +// have no effective permissions. The only exception is when the temporary security +// credentials are used to access a resource that has a resource-based policy +// that specifically allows the federated user to access the resource. +// +// For more information about how permissions work, see Permissions for GetFederationToken +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). +// For information about using GetFederationToken to create temporary security +// credentials, see GetFederationToken—Federation Through a Custom Identity +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSessionToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSessionTokenOutput{} + req.Data = output + return +} + +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled +// IAM users would need to call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that are returned from the call, IAM users can then make programmatic calls +// to APIs that require MFA authentication. If you do not supply a correct MFA +// code, then the API returns an access denied error. For a comparison of GetSessionToken +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The GetSessionToken action must be called by using the long-term AWS security +// credentials of the AWS account or an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify, from 900 seconds +// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default +// of 43200 seconds (12 hours); credentials that are created by using account +// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 +// seconds (1 hour), with a default of 1 hour. +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// You cannot call any IAM APIs unless MFA authentication information is +// included in the request. +// +// You cannot call any STS API except AssumeRole. +// +// We recommend that you do not call GetSessionToken with root account credentials. +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The permissions associated with the temporary security credentials returned +// by GetSessionToken are based on the permissions associated with account or +// IAM user whose credentials are used to call the action. If GetSessionToken +// is called using root account credentials, the temporary credentials have +// root account permissions. Similarly, if GetSessionToken is called using the +// credentials of an IAM user, the temporary credentials have the same permissions +// as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + err := req.Send() + return out, err +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might + // request using the returned credentials. The request to the federation endpoint + // for a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that is used by third parties when assuming roles in + // their customers' accounts. For each role that the third party can assume, + // they should instruct their customers to ensure the role's trust policy checks + // for the external ID that the third party generated. Each time the third party + // assumes the role, they should pass the customer's external ID. The external + // ID is useful in order to help third parties bind a role to the customer who + // created it. For more information about the external ID, see How to Use an + // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@:\/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format. + // + // This parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both (the intersection of) the access policy of the role that + // is being assumed, and the policy that you pass. This gives you a way to further + // restrict the permissions for the resulting temporary security credentials. + // You cannot use the passed policy to grant permissions that are in excess + // of those allowed by the access policy of the role that is being assumed. + // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, + // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the role to assume. + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests using the temporary security credentials will expose the role session + // name to the external account in their CloudTrail logs. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. An expiration can also be specified in the SAML authentication + // response's SessionNotOnOrAfter value. The actual expiration time is whichever + // value is shorter. + // + // This is separate from the duration of a console session that you might + // request using the returned credentials. The request to the federation endpoint + // for a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Enabling SAML 2.0 Federated + // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict + // the permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the Using IAM guide. + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might + // request using the returned credentials. The request to the federation endpoint + // for a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict + // the permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The secret access key that can be used to sign requests. + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity making the call. The values returned are those listed in the + // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds + // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained + // using AWS account (root) credentials are restricted to a maximum of 3600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using AWS account (root) credentials defaults to one + // hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that is passed with the GetFederationToken call + // and evaluated along with the policy or policies that are attached to the + // IAM user whose credentials are used to call GetFederationToken. The passed + // policy is used to scope down the permissions that are available to the IAM + // user, by allowing only a subset of the permissions that are granted to the + // IAM user. The passed policy cannot grant more permissions than those granted + // to the IAM user. The final permissions for the federated user are the most + // restrictive set based on the intersection of the passed policy and the IAM + // user policy. + // + // If you do not pass a policy, the resulting temporary security credentials + // have no effective permissions. The only exception is when the temporary security + // credentials are used to access a resource that has a resource-based policy + // that specifically allows the federated user to access the resource. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + // + // For more information about how permissions work, see Permissions for GetFederationToken + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 + // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). + // If the duration is longer than one hour, the session for AWS account owners + // defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, and the user does not provide a code when requesting a set of + // temporary security credentials, the user will receive an "access denied" + // response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 000000000..4010cc7fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,12 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 000000000..c938e6ca1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,130 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sts" + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/billputer/go-namecheap/LICENSE b/vendor/github.com/billputer/go-namecheap/LICENSE new file mode 100644 index 000000000..5a4c7b979 --- /dev/null +++ b/vendor/github.com/billputer/go-namecheap/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Bill Wiens + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/billputer/go-namecheap/README.md b/vendor/github.com/billputer/go-namecheap/README.md new file mode 100644 index 000000000..3df045df0 --- /dev/null +++ b/vendor/github.com/billputer/go-namecheap/README.md @@ -0,0 +1,43 @@ +# go-namecheap + +A Go library for using [the Namecheap API](https://www.namecheap.com/support/api/intro.aspx). + +**Build Status:** [![Build Status](https://travis-ci.org/billputer/go-namecheap.png?branch=master)](https://travis-ci.org/billputer/go-namecheap) + +## Examples + +```go +package main +import ( + "fmt" + namecheap "github.com/billputer/go-namecheap" +) + +func main() { + apiUser := "billwiens" + apiToken := "xxxxxxx" + userName := "billwiens" + + client := namecheap.NewClient(apiUser, apiToken, userName) + + // Get a list of your domains + domains, _ := client.DomainsGetList() + for _, domain := range domains { + fmt.Printf("Domain: %+v\n\n", domain.Name) + } + +} +``` + +For more complete documentation, load up godoc and find the package. + +## Development + +- Source hosted at [GitHub](https://github.com/billputer/go-namecheap) +- Report issues and feature requests to [GitHub Issues](https://github.com/billputer/go-namecheap/issues) + +Pull requests welcome! + +## Attribution + +Most concepts and code borrowed from the excellent [go-dnsimple](http://github.com/rubyist/go-dnsimple). diff --git a/vendor/github.com/billputer/go-namecheap/dns.go b/vendor/github.com/billputer/go-namecheap/dns.go new file mode 100644 index 000000000..f1aaab5a7 --- /dev/null +++ b/vendor/github.com/billputer/go-namecheap/dns.go @@ -0,0 +1,98 @@ +package namecheap + +import ( + "fmt" + "net/url" + "strconv" +) + +const ( + domainsDNSGetHosts = "namecheap.domains.dns.getHosts" + domainsDNSSetHosts = "namecheap.domains.dns.setHosts" + domainsDNSSetCustom = "namecheap.domains.dns.setCustom" +) + +type DomainDNSGetHostsResult struct { + Domain string `xml:"Domain,attr"` + IsUsingOurDNS bool `xml:"IsUsingOurDNS,attr"` + Hosts []DomainDNSHost `xml:"host"` +} + +type DomainDNSHost struct { + ID int `xml:"HostId,attr"` + Name string `xml:"Name,attr"` + Type string `xml:"Type,attr"` + Address string `xml:"Address,attr"` + MXPref int `xml:"MXPref,attr"` + TTL int `xml:"TTL,attr"` +} + +type DomainDNSSetHostsResult struct { + Domain string `xml:"Domain,attr"` + IsSuccess bool `xml:"IsSuccess,attr"` +} + +func (client *Client) DomainsDNSGetHosts(sld, tld string) (*DomainDNSGetHostsResult, error) { + requestInfo := &ApiRequest{ + command: domainsDNSGetHosts, + method: "GET", + params: url.Values{}, + } + requestInfo.params.Set("SLD", sld) + requestInfo.params.Set("TLD", tld) + + resp, err := client.do(requestInfo) + if err != nil { + return nil, err + } + + return resp.DomainDNSHosts, nil +} + +func (client *Client) DomainDNSSetHosts( + sld, tld string, hosts []DomainDNSHost, +) (*DomainDNSSetHostsResult, error) { + requestInfo := &ApiRequest{ + command: domainsDNSSetHosts, + method: "GET", + params: url.Values{}, + } + requestInfo.params.Set("SLD", sld) + requestInfo.params.Set("TLD", tld) + + for i := range hosts { + requestInfo.params.Set(fmt.Sprintf("HostName%v", i+1), hosts[i].Name) + requestInfo.params.Set(fmt.Sprintf("RecordType%v", i+1), hosts[i].Type) + requestInfo.params.Set(fmt.Sprintf("Address%v", i+1), hosts[i].Address) + requestInfo.params.Set(fmt.Sprintf("TTL%v", i+1), strconv.Itoa(hosts[i].TTL)) + + } + + resp, err := client.do(requestInfo) + if err != nil { + return nil, err + } + return resp.DomainDNSSetHosts, nil +} + +type DomainDNSSetCustomResult struct { + Domain string `xml:"Domain,attr"` + Update bool `xml:"Update,attr"` +} + +func (client *Client) DomainDNSSetCustom(sld, tld, nameservers string) (*DomainDNSSetCustomResult, error) { + requestInfo := &ApiRequest{ + command: domainsDNSSetCustom, + method: "GET", + params: url.Values{}, + } + requestInfo.params.Set("SLD", sld) + requestInfo.params.Set("TLD", tld) + requestInfo.params.Set("Nameservers", nameservers) + + resp, err := client.do(requestInfo) + if err != nil { + return nil, err + } + return resp.DomainDNSSetCustom, nil +} diff --git a/vendor/github.com/billputer/go-namecheap/domain.go b/vendor/github.com/billputer/go-namecheap/domain.go new file mode 100644 index 000000000..127dfc09c --- /dev/null +++ b/vendor/github.com/billputer/go-namecheap/domain.go @@ -0,0 +1,136 @@ +package namecheap + +import ( + "errors" + "net/url" + "strconv" + "strings" +) + +const ( + domainsGetList = "namecheap.domains.getList" + domainsGetInfo = "namecheap.domains.getInfo" + domainsCheck = "namecheap.domains.check" + domainsCreate = "namecheap.domains.create" +) + +// DomainGetListResult represents the data returned by 'domains.getList' +type DomainGetListResult struct { + ID int `xml:"ID,attr"` + Name string `xml:"Name,attr"` + User string `xml:"User,attr"` + Created string `xml:"Created,attr"` + Expires string `xml:"Expires,attr"` + IsExpired bool `xml:"IsExpired,attr"` + IsLocked bool `xml:"IsLocked,attr"` + AutoRenew bool `xml:"AutoRenew,attr"` + WhoisGuard string `xml:"WhoisGuard,attr"` +} + +// DomainInfo represents the data returned by 'domains.getInfo' +type DomainInfo struct { + ID int `xml:"ID,attr"` + Name string `xml:"DomainName,attr"` + Owner string `xml:"OwnerName,attr"` + Created string `xml:"DomainDetails>CreatedDate"` + Expires string `xml:"DomainDetails>ExpiredDate"` + IsExpired bool `xml:"IsExpired,attr"` + IsLocked bool `xml:"IsLocked,attr"` + AutoRenew bool `xml:"AutoRenew,attr"` + DNSDetails DNSDetails `xml:"DnsDetails"` +} + +type DNSDetails struct { + ProviderType string `xml:"ProviderType,attr"` + IsUsingOurDNS bool `xml:"IsUsingOurDNS,attr"` + Nameservers []string `xml:"Nameserver"` +} + +type DomainCheckResult struct { + Domain string `xml:"Domain,attr"` + Available bool `xml:"Available,attr"` +} + +type DomainCreateResult struct { + Domain string `xml:"Domain,attr"` + Registered bool `xml:"Registered,attr"` + ChargedAmount float64 `xml:"ChargedAmount,attr"` + DomainID int `xml:"DomainID,attr"` + OrderID int `xml:"OrderID,attr"` + TransactionID int `xml:"TransactionID,attr"` + WhoisGuardEnable bool `xml:"WhoisGuardEnable,attr"` + NonRealTimeDomain bool `xml:"NonRealTimeDomain,attr"` +} + +func (client *Client) DomainsGetList() ([]DomainGetListResult, error) { + requestInfo := &ApiRequest{ + command: domainsGetList, + method: "GET", + params: url.Values{}, + } + + resp, err := client.do(requestInfo) + if err != nil { + return nil, err + } + + return resp.Domains, nil +} + +func (client *Client) DomainGetInfo(domainName string) (*DomainInfo, error) { + requestInfo := &ApiRequest{ + command: domainsGetInfo, + method: "GET", + params: url.Values{}, + } + + requestInfo.params.Set("DomainName", domainName) + + resp, err := client.do(requestInfo) + if err != nil { + return nil, err + } + + return resp.DomainInfo, nil +} + +func (client *Client) DomainsCheck(domainNames ...string) ([]DomainCheckResult, error) { + requestInfo := &ApiRequest{ + command: domainsCheck, + method: "GET", + params: url.Values{}, + } + + requestInfo.params.Set("DomainList", strings.Join(domainNames, ",")) + resp, err := client.do(requestInfo) + if err != nil { + return nil, err + } + + return resp.DomainsCheck, nil +} + +func (client *Client) DomainCreate(domainName string, years int) (*DomainCreateResult, error) { + if client.Registrant == nil { + return nil, errors.New("Registrant information on client cannot be empty") + } + + requestInfo := &ApiRequest{ + command: domainsCreate, + method: "POST", + params: url.Values{}, + } + + requestInfo.params.Set("DomainName", domainName) + requestInfo.params.Set("Years", strconv.Itoa(years)) + if err := client.Registrant.addValues(requestInfo.params); err != nil { + return nil, err + } + + resp, err := client.do(requestInfo) + if err != nil { + return nil, err + } + + return resp.DomainCreate, nil +} diff --git a/vendor/github.com/billputer/go-namecheap/namecheap.go b/vendor/github.com/billputer/go-namecheap/namecheap.go new file mode 100644 index 000000000..840d90297 --- /dev/null +++ b/vendor/github.com/billputer/go-namecheap/namecheap.go @@ -0,0 +1,161 @@ +// Package namecheap implements a client for the Namecheap API. +// +// In order to use this package you will need a Namecheap account and your API Token. +package namecheap + +import ( + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const defaultBaseURL = "https://api.namecheap.com/xml.response" + +// Client represents a client used to make calls to the Namecheap API. +type Client struct { + ApiUser string + ApiToken string + UserName string + HttpClient *http.Client + + // Base URL for API requests. + // Defaults to the public Namecheap API, + // but can be set to a different endpoint (e.g. the sandbox). + // BaseURL should always be specified with a trailing slash. + BaseURL string + + *Registrant +} + +type ApiRequest struct { + method string + command string + params url.Values +} + +type ApiResponse struct { + Status string `xml:"Status,attr"` + Command string `xml:"RequestedCommand"` + Domains []DomainGetListResult `xml:"CommandResponse>DomainGetListResult>Domain"` + DomainInfo *DomainInfo `xml:"CommandResponse>DomainGetInfoResult"` + DomainDNSHosts *DomainDNSGetHostsResult `xml:"CommandResponse>DomainDNSGetHostsResult"` + DomainDNSSetHosts *DomainDNSSetHostsResult `xml:"CommandResponse>DomainDNSSetHostsResult"` + DomainCreate *DomainCreateResult `xml:"CommandResponse>DomainCreateResult"` + DomainsCheck []DomainCheckResult `xml:"CommandResponse>DomainCheckResult"` + DomainNSInfo *DomainNSInfoResult `xml:"CommandResponse>DomainNSInfoResult"` + DomainDNSSetCustom *DomainDNSSetCustomResult `xml:"CommandResponse>DomainDNSSetCustomResult"` + Errors []ApiError `xml:"Errors>Error"` +} + +// ApiError is the format of the error returned in the api responses. +type ApiError struct { + Number int `xml:"Number,attr"` + Message string `xml:",innerxml"` +} + +func (err *ApiError) Error() string { + return err.Message +} + +func NewClient(apiUser, apiToken, userName string) *Client { + return &Client{ + ApiUser: apiUser, + ApiToken: apiToken, + UserName: userName, + HttpClient: http.DefaultClient, + BaseURL: defaultBaseURL, + } +} + +// NewRegistrant associates a new registrant with the +func (client *Client) NewRegistrant( + firstName, lastName, + addr1, addr2, + city, state, postalCode, country, + phone, email string, +) { + client.Registrant = newRegistrant( + firstName, lastName, + addr1, addr2, + city, state, postalCode, country, + phone, email, + ) +} + +func (client *Client) do(request *ApiRequest) (*ApiResponse, error) { + if request.method == "" { + return nil, errors.New("request method cannot be blank") + } + + body, _, err := client.sendRequest(request) + if err != nil { + return nil, err + } + + resp := new(ApiResponse) + if err = xml.Unmarshal(body, resp); err != nil { + return nil, err + } + + if resp.Status == "ERROR" { + errMsg := "" + for _, apiError := range resp.Errors { + errMsg += fmt.Sprintf("Error %d: %s\n", apiError.Number, apiError.Message) + } + return nil, errors.New(errMsg) + } + + return resp, nil +} + +func (client *Client) makeRequest(request *ApiRequest) (*http.Request, error) { + url, err := url.Parse(client.BaseURL) + if err != nil { + return nil, err + } + p := request.params + p.Set("ApiUser", client.ApiUser) + p.Set("ApiKey", client.ApiToken) + p.Set("UserName", client.UserName) + // This param is required by the API, but not actually used. + p.Set("ClientIp", "127.0.0.1") + p.Set("Command", request.command) + url.RawQuery = p.Encode() + + // UGH + // + // Need this for the domain name part of the domains.check endpoint + url.RawQuery = strings.Replace(url.RawQuery, "%2C", ",", -1) + + urlString := fmt.Sprintf("%s?%s", client.BaseURL, url.RawQuery) + req, err := http.NewRequest(request.method, urlString, nil) + if err != nil { + return nil, err + } + + return req, nil +} + +func (client *Client) sendRequest(request *ApiRequest) ([]byte, int, error) { + req, err := client.makeRequest(request) + if err != nil { + return nil, 0, err + } + + resp, err := client.HttpClient.Do(req) + if err != nil { + return nil, 0, err + } + defer resp.Body.Close() + + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, 0, err + } + + return buf, resp.StatusCode, nil +} diff --git a/vendor/github.com/billputer/go-namecheap/ns.go b/vendor/github.com/billputer/go-namecheap/ns.go new file mode 100644 index 000000000..aabf589fc --- /dev/null +++ b/vendor/github.com/billputer/go-namecheap/ns.go @@ -0,0 +1,35 @@ +package namecheap + +import "net/url" + +const ( + nsCreate = "namecheap.domains.ns.create" + nsDelete = "namecheap.domains.ns.delete" + nsGetInfo = "namecheap.domains.ns.getInfo" + nsUpdate = "namecheap.domains.ns.update" +) + +type DomainNSInfoResult struct { + Domain string `xml:"Domain,attr"` + Nameserver string `xml:"Nameserver,attr"` + IP string `xml:"IP,attr"` + Statuses []string `xml:"NameserverStatuses>Status"` +} + +func (client *Client) NSGetInfo(sld, tld, nameserver string) (*DomainNSInfoResult, error) { + requestInfo := &ApiRequest{ + command: nsGetInfo, + method: "GET", + params: url.Values{}, + } + requestInfo.params.Set("SLD", sld) + requestInfo.params.Set("TLD", tld) + requestInfo.params.Set("Nameserver", nameserver) + + resp, err := client.do(requestInfo) + if err != nil { + return nil, err + } + + return resp.DomainNSInfo, nil +} diff --git a/vendor/github.com/billputer/go-namecheap/registrant.go b/vendor/github.com/billputer/go-namecheap/registrant.go new file mode 100644 index 000000000..5b8b30315 --- /dev/null +++ b/vendor/github.com/billputer/go-namecheap/registrant.go @@ -0,0 +1,119 @@ +package namecheap + +import ( + "errors" + "fmt" + "net/url" + "reflect" + "strings" +) + +// Registrant is a struct that contains all the data necesary to register a domain. +// That is to say, every field in this struct is REQUIRED by the namecheap api to +// crate a new domain. +// In order for `addValues` method to work, all fields must remain strings. +type Registrant struct { + RegistrantFirstName, RegistrantLastName, + RegistrantAddress1, RegistrantAddress2, RegistrantCity, + RegistrantStateProvince, RegistrantPostalCode, RegistrantCountry, + RegistrantPhone, RegistrantEmailAddress, + + TechFirstName, TechLastName, + TechAddress1, TechAddress2, + TechCity, TechStateProvince, TechPostalCode, TechCountry, + TechPhone, TechEmailAddress, + + AdminFirstName, AdminLastName, + AdminAddress1, AdminAddress2, + AdminCity, AdminStateProvince, AdminPostalCode, AdminCountry, + AdminPhone, AdminEmailAddress, + + AuxBillingFirstName, AuxBillingLastName, + AuxBillingAddress1, AuxBillingAddress2, + AuxBillingCity, AuxBillingStateProvince, AuxBillingPostalCode, AuxBillingCountry, + AuxBillingPhone, AuxBillingEmailAddress string +} + +// newRegistrant return a new registrant where all the required fields are the same. +// Feel free to change them as needed +func newRegistrant( + firstName, lastName, + addr1, addr2, + city, state, postalCode, country, + phone, email string, +) *Registrant { + return &Registrant{ + RegistrantFirstName: firstName, + RegistrantLastName: lastName, + RegistrantAddress1: addr1, + RegistrantAddress2: addr2, + RegistrantCity: city, + RegistrantStateProvince: state, + RegistrantPostalCode: postalCode, + RegistrantCountry: country, + RegistrantPhone: phone, + RegistrantEmailAddress: email, + TechFirstName: firstName, + TechLastName: lastName, + TechAddress1: addr1, + TechAddress2: addr2, + TechCity: city, + TechStateProvince: state, + TechPostalCode: postalCode, + TechCountry: country, + TechPhone: phone, + TechEmailAddress: email, + AdminFirstName: firstName, + AdminLastName: lastName, + AdminAddress1: addr1, + AdminAddress2: addr2, + AdminCity: city, + AdminStateProvince: state, + AdminPostalCode: postalCode, + AdminCountry: country, + AdminPhone: phone, + AdminEmailAddress: email, + AuxBillingFirstName: firstName, + AuxBillingLastName: lastName, + AuxBillingAddress1: addr1, + AuxBillingAddress2: addr2, + AuxBillingCity: city, + AuxBillingStateProvince: state, + AuxBillingPostalCode: postalCode, + AuxBillingCountry: country, + AuxBillingPhone: phone, + AuxBillingEmailAddress: email, + } +} + +// addValues adds the fields of this struct to the passed in url.Values. +// It is important that all the fields of Registrant remain string type. +func (reg *Registrant) addValues(u url.Values) error { + if u == nil { + return errors.New("nil value passed as url.Values") + } + + val := reflect.ValueOf(*reg) + t := val.Type() + for i := 0; i < val.NumField(); i++ { + fieldName := t.Field(i).Name + field := val.Field(i).String() + if ty := val.Field(i).Kind(); ty != reflect.String { + return fmt.Errorf( + "Registrant cannot have types that aren't string; %s is type %s", + fieldName, ty, + ) + } + if field == "" { + if strings.Contains(fieldName, "ddress2") { + continue + } + + return fmt.Errorf("Field %s cannot be empty", fieldName) + } + + u.Set(fieldName, fmt.Sprintf("%v", field)) + } + + return nil +} diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 000000000..1272038a9 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,560 @@ +ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte` or file) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + + go get gopkg.in/ini.v1 + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +To auto-split value into slice: + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 000000000..45e19eddd --- /dev/null +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,547 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte` 或文件) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + + go get gopkg.in/ini.v1 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +自动分割键值为切片(slice): + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +### 高级用法 + +#### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +#### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 000000000..1fee789a1 --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,1226 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + + _VERSION = "1.6.0" +) + +func Version() string { + return _VERSION +} + +var ( + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Write spaces around "=" to look better. + PrettyFormat = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is a interface that returns file content. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// ____ __. +// | |/ _|____ ___.__. +// | <_/ __ < | | +// | | \ ___/\___ | +// |____|__ \___ > ____| +// \/ \/\/ + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string devide by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 devide by given delimiter. +func (k *Key) Float64s(delim string) []float64 { + strs := k.Strings(delim) + vals := make([]float64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseFloat(strs[i], 64) + } + return vals +} + +// Ints returns list of int devide by given delimiter. +func (k *Key) Ints(delim string) []int { + strs := k.Strings(delim) + vals := make([]int, len(strs)) + for i := range strs { + vals[i], _ = strconv.Atoi(strs[i]) + } + return vals +} + +// Int64s returns list of int64 devide by given delimiter. +func (k *Key) Int64s(delim string) []int64 { + strs := k.Strings(delim) + vals := make([]int64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseInt(strs[i], 10, 64) + } + return vals +} + +// Uints returns list of uint devide by given delimiter. +func (k *Key) Uints(delim string) []uint { + strs := k.Strings(delim) + vals := make([]uint, len(strs)) + for i := range strs { + u, _ := strconv.ParseUint(strs[i], 10, 64) + vals[i] = uint(u) + } + return vals +} + +// Uint64s returns list of uint64 devide by given delimiter. +func (k *Key) Uint64s(delim string) []uint64 { + strs := k.Strings(delim) + vals := make([]uint64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseUint(strs[i], 10, 64) + } + return vals +} + +// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. +func (k *Key) TimesFormat(format, delim string) []time.Time { + strs := k.Strings(delim) + vals := make([]time.Time, len(strs)) + for i := range strs { + vals[i], _ = time.Parse(format, strs[i]) + } + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + k.value = v +} + +// _________ __ .__ +// / _____/ ____ _____/ |_|__| ____ ____ +// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ +// / \ ___/\ \___| | | ( <_> ) | \ +// /_______ /\___ >\___ >__| |__|\____/|___| / +// \/ \/ \/ \/ + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ___________.__.__ +// \_ _____/|__| | ____ +// | __) | | | _/ __ \ +// | \ | | |_\ ___/ +// \___ / |__|____/\___ > +// \/ \/ + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + NameMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +func Load(source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources) + return f, f.Reload() +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func cutComment(str string) string { + i := strings.Index(str, "#") + if i == -1 { + return str + } + return str[:i] +} + +func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) { + isEnd := false + for { + next, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return "", err + } + isEnd = true + } + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + break + } + val += next + if isEnd { + return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) { + isEnd := false + for { + valLen := len(val) + if valLen == 0 || val[valLen-1] != '\\' { + break + } + val = val[:valLen-1] + + next, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return "", isEnd, err + } + isEnd = true + } + + next = strings.TrimSpace(next) + if len(next) == 0 { + break + } + val += next + } + return val, isEnd, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) error { + buf := bufio.NewReader(reader) + + // Handle BOM-UTF8. + // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding + mask, err := buf.Peek(3) + if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + buf.Read(mask) + } + + count := 1 + comments := "" + isEnd := false + + section, err := f.NewSection(DEFAULT_SECTION) + if err != nil { + return err + } + + for { + line, err := buf.ReadString('\n') + line = strings.TrimSpace(line) + length := len(line) + + // Check error and ignore io.EOF just for a moment. + if err != nil { + if err != io.EOF { + return fmt.Errorf("error reading next line: %v", err) + } + // The last line of file could be an empty line. + if length == 0 { + break + } + isEnd = true + } + + // Skip empty lines. + if length == 0 { + continue + } + + switch { + case line[0] == '#' || line[0] == ';': // Comments. + if len(comments) == 0 { + comments = line + } else { + comments += LineBreak + line + } + continue + case line[0] == '[' && line[length-1] == ']': // New sction. + section, err = f.NewSection(strings.TrimSpace(line[1 : length-1])) + if err != nil { + return err + } + + if len(comments) > 0 { + section.Comment = comments + comments = "" + } + // Reset counter. + count = 1 + continue + } + + // Other possibilities. + var ( + i int + keyQuote string + kname string + valQuote string + val string + ) + + // Key name surrounded by quotes. + if line[0] == '"' { + if length > 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + if len(keyQuote) > 0 { + qLen := len(keyQuote) + pos := strings.Index(line[qLen:], keyQuote) + if pos == -1 { + return fmt.Errorf("error parsing line: missing closing key quote: %s", line) + } + pos = pos + qLen + i = strings.IndexAny(line[pos:], "=:") + if i < 0 { + return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) + } else if i == pos { + return fmt.Errorf("error parsing line: key is empty: %s", line) + } + i = i + pos + kname = line[qLen:pos] // Just keep spaces inside quotes. + } else { + i = strings.IndexAny(line, "=:") + if i < 0 { + return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) + } else if i == 0 { + return fmt.Errorf("error parsing line: key is empty: %s", line) + } + kname = strings.TrimSpace(line[0:i]) + } + + isAutoIncr := false + // Auto increment. + if kname == "-" { + isAutoIncr = true + kname = "#" + fmt.Sprint(count) + count++ + } + + lineRight := strings.TrimSpace(line[i+1:]) + lineRightLength := len(lineRight) + firstChar := "" + if lineRightLength >= 2 { + firstChar = lineRight[0:1] + } + if firstChar == "`" { + valQuote = "`" + } else if firstChar == `"` { + if lineRightLength >= 3 && lineRight[0:3] == `"""` { + valQuote = `"""` + } else { + valQuote = `"` + } + } else if firstChar == `'` { + valQuote = `'` + } + + if len(valQuote) > 0 { + qLen := len(valQuote) + pos := strings.LastIndex(lineRight[qLen:], valQuote) + // For multiple-line value check. + if pos == -1 { + if valQuote == `"` || valQuote == `'` { + return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line) + } + + val = lineRight[qLen:] + "\n" + val, err = checkMultipleLines(buf, line, val, valQuote) + if err != nil { + return err + } + } else { + val = lineRight[qLen : pos+qLen] + } + } else { + val = strings.TrimSpace(cutComment(lineRight)) + val, isEnd, err = checkContinuationLines(buf, val) + if err != nil { + return err + } + } + + k, err := section.NewKey(kname, val) + if err != nil { + return err + } + k.isAutoIncr = isAutoIncr + if len(comments) > 0 { + k.Comment = comments + comments = "" + } + + if isEnd { + break + } + } + return nil +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes file content into io.Writer with given value indention. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty. + if len(sec.keyList) == 0 { + continue + } + } + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncr: + kname = "-" + case strings.Contains(kname, "`") || strings.Contains(kname, `"`): + kname = `"""` + kname + `"""` + case strings.Contains(kname, `=`) || strings.Contains(kname, `:`): + kname = "`" + kname + "`" + } + + val := key.value + // In case key value contains "\n", "`" or "\"". + if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) || + strings.Contains(val, "#") { + val = `"""` + val + `"""` + } + if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections. + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 000000000..c11843710 --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,350 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + vals := key.Strings(delim) + numVals := len(vals) + if numVals == 0 { + return nil + } + + sliceOf := field.Type().Elem().Kind() + + var times []time.Time + if sliceOf == reflectTime { + times = key.Times(delim) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(times[i])) + default: + slice.Index(i).Set(reflect.ValueOf(vals[i])) + } + } + field.Set(slice) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectWithProperType does the opposite thing with setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, + reflectTime: + key.SetValue(fmt.Sprint(field)) + case reflect.Slice: + vals := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + isTime := fmt.Sprint(field.Type()) == "[]time.Time" + for i := 0; i < field.Len(); i++ { + if isTime { + buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) + } else { + buf.WriteString(fmt.Sprint(vals.Index(i))) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct) { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 000000000..b03310a91 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 000000000..ad17bf001 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/corpus + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 000000000..187ef676d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 000000000..67df3fc1c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,12 @@ +package jmespath + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 000000000..1cd2d239c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 000000000..8a3f2ef0d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,840 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": functionEntry{ + name: "length", + arguments: []argSpec{ + argSpec{types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": functionEntry{ + name: "starts_with", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": functionEntry{ + name: "abs", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": functionEntry{ + name: "avg", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": functionEntry{ + name: "ceil", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": functionEntry{ + name: "contains", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray, jpString}}, + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": functionEntry{ + name: "ends_with", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": functionEntry{ + name: "floor", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": functionEntry{ + name: "amp", + arguments: []argSpec{ + argSpec{types: []jpType{jpExpref}}, + argSpec{types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": functionEntry{ + name: "max", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": functionEntry{ + name: "merge", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": functionEntry{ + name: "max_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": functionEntry{ + name: "sum", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": functionEntry{ + name: "min", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": functionEntry{ + name: "min_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": functionEntry{ + name: "type", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": functionEntry{ + name: "keys", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": functionEntry{ + name: "values", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": functionEntry{ + name: "sort", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": functionEntry{ + name: "sort_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": functionEntry{ + name: "join", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": functionEntry{ + name: "reverse", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": functionEntry{ + name: "to_array", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": functionEntry{ + name: "to_string", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": functionEntry{ + name: "to_number", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": functionEntry{ + name: "not_null", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if _, ok := arg.([]interface{}); ok { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if c, ok := arg.([]interface{}); ok { + return float64(len(c)), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 000000000..13c74604c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 000000000..817900c8f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 000000000..c8f4bcebd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{ASTNode{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{ASTNode{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 000000000..dae79cbdf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 000000000..ddc1b7d7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/kolo/xmlrpc/LICENSE b/vendor/github.com/kolo/xmlrpc/LICENSE new file mode 100644 index 000000000..8103dd139 --- /dev/null +++ b/vendor/github.com/kolo/xmlrpc/LICENSE @@ -0,0 +1,19 @@ +Copyright (C) 2012 Dmitry Maksimov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/kolo/xmlrpc/README.md b/vendor/github.com/kolo/xmlrpc/README.md new file mode 100644 index 000000000..12b7692e9 --- /dev/null +++ b/vendor/github.com/kolo/xmlrpc/README.md @@ -0,0 +1,79 @@ +## Overview + +xmlrpc is an implementation of client side part of XMLRPC protocol in Go language. + +## Installation + +To install xmlrpc package run `go get github.com/kolo/xmlrpc`. To use +it in application add `"github.com/kolo/xmlrpc"` string to `import` +statement. + +## Usage + + client, _ := xmlrpc.NewClient("https://bugzilla.mozilla.org/xmlrpc.cgi", nil) + result := struct{ + Version string `xmlrpc:"version"` + }{} + client.Call("Bugzilla.version", nil, &result) + fmt.Printf("Version: %s\n", result.Version) // Version: 4.2.7+ + +Second argument of NewClient function is an object that implements +[http.RoundTripper](http://golang.org/pkg/net/http/#RoundTripper) +interface, it can be used to get more control over connection options. +By default it initialized by http.DefaultTransport object. + +### Arguments encoding + +xmlrpc package supports encoding of native Go data types to method +arguments. + +Data types encoding rules: +* int, int8, int16, int32, int64 encoded to int; +* float32, float64 encoded to double; +* bool encoded to boolean; +* string encoded to string; +* time.Time encoded to datetime.iso8601; +* xmlrpc.Base64 encoded to base64; +* slice decoded to array; + +Structs decoded to struct by following rules: +* all public field become struct members; +* field name become member name; +* if field has xmlrpc tag, its value become member name. + +Server method can accept few arguments, to handle this case there is +special approach to handle slice of empty interfaces (`[]interface{}`). +Each value of such slice encoded as separate argument. + +### Result decoding + +Result of remote function is decoded to native Go data type. + +Data types decoding rules: +* int, i4 decoded to int, int8, int16, int32, int64; +* double decoded to float32, float64; +* boolean decoded to bool; +* string decoded to string; +* array decoded to slice; +* structs decoded following the rules described in previous section; +* datetime.iso8601 decoded as time.Time data type; +* base64 decoded to string. + +## Implementation details + +xmlrpc package contains clientCodec type, that implements [rpc.ClientCodec](http://golang.org/pkg/net/rpc/#ClientCodec) +interface of [net/rpc](http://golang.org/pkg/net/rpc) package. + +xmlrpc package works over HTTP protocol, but some internal functions +and data type were made public to make it easier to create another +implementation of xmlrpc that works over another protocol. To encode +request body there is EncodeMethodCall function. To decode server +response Response data type can be used. + +## Contribution + +Feel free to fork the project, submit pull requests, ask questions. + +## Authors + +Dmitry Maksimov (dmtmax@gmail.com) diff --git a/vendor/github.com/kolo/xmlrpc/client.go b/vendor/github.com/kolo/xmlrpc/client.go new file mode 100644 index 000000000..fb66b65fb --- /dev/null +++ b/vendor/github.com/kolo/xmlrpc/client.go @@ -0,0 +1,144 @@ +package xmlrpc + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/rpc" + "net/url" +) + +type Client struct { + *rpc.Client +} + +// clientCodec is rpc.ClientCodec interface implementation. +type clientCodec struct { + // url presents url of xmlrpc service + url *url.URL + + // httpClient works with HTTP protocol + httpClient *http.Client + + // cookies stores cookies received on last request + cookies http.CookieJar + + // responses presents map of active requests. It is required to return request id, that + // rpc.Client can mark them as done. + responses map[uint64]*http.Response + + response *Response + + // ready presents channel, that is used to link request and it`s response. + ready chan uint64 +} + +func (codec *clientCodec) WriteRequest(request *rpc.Request, args interface{}) (err error) { + httpRequest, err := NewRequest(codec.url.String(), request.ServiceMethod, args) + + if codec.cookies != nil { + for _, cookie := range codec.cookies.Cookies(codec.url) { + httpRequest.AddCookie(cookie) + } + } + + if err != nil { + return err + } + + var httpResponse *http.Response + httpResponse, err = codec.httpClient.Do(httpRequest) + + if err != nil { + return err + } + + if codec.cookies != nil { + codec.cookies.SetCookies(codec.url, httpResponse.Cookies()) + } + + codec.responses[request.Seq] = httpResponse + codec.ready <- request.Seq + + return nil +} + +func (codec *clientCodec) ReadResponseHeader(response *rpc.Response) (err error) { + seq := <-codec.ready + httpResponse := codec.responses[seq] + + if httpResponse.StatusCode < 200 || httpResponse.StatusCode >= 300 { + return fmt.Errorf("request error: bad status code - %d", httpResponse.StatusCode) + } + + respData, err := ioutil.ReadAll(httpResponse.Body) + + if err != nil { + return err + } + + httpResponse.Body.Close() + + resp := NewResponse(respData) + + if resp.Failed() { + response.Error = fmt.Sprintf("%v", resp.Err()) + } + + codec.response = resp + + response.Seq = seq + delete(codec.responses, seq) + + return nil +} + +func (codec *clientCodec) ReadResponseBody(v interface{}) (err error) { + if v == nil { + return nil + } + + if err = codec.response.Unmarshal(v); err != nil { + return err + } + + return nil +} + +func (codec *clientCodec) Close() error { + transport := codec.httpClient.Transport.(*http.Transport) + transport.CloseIdleConnections() + return nil +} + +// NewClient returns instance of rpc.Client object, that is used to send request to xmlrpc service. +func NewClient(requrl string, transport http.RoundTripper) (*Client, error) { + if transport == nil { + transport = http.DefaultTransport + } + + httpClient := &http.Client{Transport: transport} + + jar, err := cookiejar.New(nil) + + if err != nil { + return nil, err + } + + u, err := url.Parse(requrl) + + if err != nil { + return nil, err + } + + codec := clientCodec{ + url: u, + httpClient: httpClient, + ready: make(chan uint64), + responses: make(map[uint64]*http.Response), + cookies: jar, + } + + return &Client{rpc.NewClientWithCodec(&codec)}, nil +} diff --git a/vendor/github.com/kolo/xmlrpc/decoder.go b/vendor/github.com/kolo/xmlrpc/decoder.go new file mode 100644 index 000000000..b73955978 --- /dev/null +++ b/vendor/github.com/kolo/xmlrpc/decoder.go @@ -0,0 +1,449 @@ +package xmlrpc + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +const iso8601 = "20060102T15:04:05" + +var ( + // CharsetReader is a function to generate reader which converts a non UTF-8 + // charset into UTF-8. + CharsetReader func(string, io.Reader) (io.Reader, error) + + invalidXmlError = errors.New("invalid xml") +) + +type TypeMismatchError string + +func (e TypeMismatchError) Error() string { return string(e) } + +type decoder struct { + *xml.Decoder +} + +func unmarshal(data []byte, v interface{}) (err error) { + dec := &decoder{xml.NewDecoder(bytes.NewBuffer(data))} + + if CharsetReader != nil { + dec.CharsetReader = CharsetReader + } + + var tok xml.Token + for { + if tok, err = dec.Token(); err != nil { + return err + } + + if t, ok := tok.(xml.StartElement); ok { + if t.Name.Local == "value" { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer value passed to unmarshal") + } + if err = dec.decodeValue(val.Elem()); err != nil { + return err + } + + break + } + } + } + + // read until end of document + err = dec.Skip() + if err != nil && err != io.EOF { + return err + } + + return nil +} + +func (dec *decoder) decodeValue(val reflect.Value) error { + var tok xml.Token + var err error + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + var typeName string + for { + if tok, err = dec.Token(); err != nil { + return err + } + + if t, ok := tok.(xml.EndElement); ok { + if t.Name.Local == "value" { + return nil + } else { + return invalidXmlError + } + } + + if t, ok := tok.(xml.StartElement); ok { + typeName = t.Name.Local + break + } + + // Treat value data without type identifier as string + if t, ok := tok.(xml.CharData); ok { + if value := strings.TrimSpace(string(t)); value != "" { + if err = checkType(val, reflect.String); err != nil { + return err + } + + val.SetString(value) + return nil + } + } + } + + switch typeName { + case "struct": + ismap := false + pmap := val + valType := val.Type() + + if err = checkType(val, reflect.Struct); err != nil { + if checkType(val, reflect.Map) == nil { + if valType.Key().Kind() != reflect.String { + return fmt.Errorf("only maps with string key type can be unmarshalled") + } + ismap = true + } else if checkType(val, reflect.Interface) == nil && val.IsNil() { + var dummy map[string]interface{} + pmap = reflect.New(reflect.TypeOf(dummy)).Elem() + valType = pmap.Type() + ismap = true + } else { + return err + } + } + + var fields map[string]reflect.Value + + if !ismap { + fields = make(map[string]reflect.Value) + + for i := 0; i < valType.NumField(); i++ { + field := valType.Field(i) + fieldVal := val.FieldByName(field.Name) + + if fieldVal.CanSet() { + if fn := field.Tag.Get("xmlrpc"); fn != "" { + fields[fn] = fieldVal + } else { + fields[field.Name] = fieldVal + } + } + } + } else { + // Create initial empty map + pmap.Set(reflect.MakeMap(valType)) + } + + // Process struct members. + StructLoop: + for { + if tok, err = dec.Token(); err != nil { + return err + } + switch t := tok.(type) { + case xml.StartElement: + if t.Name.Local != "member" { + return invalidXmlError + } + + tagName, fieldName, err := dec.readTag() + if err != nil { + return err + } + if tagName != "name" { + return invalidXmlError + } + + var fv reflect.Value + ok := true + + if !ismap { + fv, ok = fields[string(fieldName)] + } else { + fv = reflect.New(valType.Elem()) + } + + if ok { + for { + if tok, err = dec.Token(); err != nil { + return err + } + if t, ok := tok.(xml.StartElement); ok && t.Name.Local == "value" { + if err = dec.decodeValue(fv); err != nil { + return err + } + + // + if err = dec.Skip(); err != nil { + return err + } + + break + } + } + } + + // + if err = dec.Skip(); err != nil { + return err + } + + if ismap { + pmap.SetMapIndex(reflect.ValueOf(string(fieldName)), reflect.Indirect(fv)) + val.Set(pmap) + } + case xml.EndElement: + break StructLoop + } + } + case "array": + pslice := val + if checkType(val, reflect.Interface) == nil && val.IsNil() { + var dummy []interface{} + pslice = reflect.New(reflect.TypeOf(dummy)).Elem() + } else if err = checkType(val, reflect.Slice); err != nil { + return err + } + + ArrayLoop: + for { + if tok, err = dec.Token(); err != nil { + return err + } + + switch t := tok.(type) { + case xml.StartElement: + if t.Name.Local != "data" { + return invalidXmlError + } + + slice := reflect.MakeSlice(pslice.Type(), 0, 0) + + DataLoop: + for { + if tok, err = dec.Token(); err != nil { + return err + } + + switch tt := tok.(type) { + case xml.StartElement: + if tt.Name.Local != "value" { + return invalidXmlError + } + + v := reflect.New(pslice.Type().Elem()) + if err = dec.decodeValue(v); err != nil { + return err + } + + slice = reflect.Append(slice, v.Elem()) + + // + if err = dec.Skip(); err != nil { + return err + } + case xml.EndElement: + pslice.Set(slice) + val.Set(pslice) + break DataLoop + } + } + case xml.EndElement: + break ArrayLoop + } + } + default: + if tok, err = dec.Token(); err != nil { + return err + } + + var data []byte + + switch t := tok.(type) { + case xml.EndElement: + return nil + case xml.CharData: + data = []byte(t.Copy()) + default: + return invalidXmlError + } + + switch typeName { + case "int", "i4", "i8": + if checkType(val, reflect.Interface) == nil && val.IsNil() { + i, err := strconv.ParseInt(string(data), 10, 64) + if err != nil { + return err + } + + pi := reflect.New(reflect.TypeOf(i)).Elem() + pi.SetInt(i) + val.Set(pi) + } else if err = checkType(val, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64); err != nil { + return err + } else { + i, err := strconv.ParseInt(string(data), 10, val.Type().Bits()) + if err != nil { + return err + } + + val.SetInt(i) + } + case "string", "base64": + str := string(data) + if checkType(val, reflect.Interface) == nil && val.IsNil() { + pstr := reflect.New(reflect.TypeOf(str)).Elem() + pstr.SetString(str) + val.Set(pstr) + } else if err = checkType(val, reflect.String); err != nil { + return err + } else { + val.SetString(str) + } + case "dateTime.iso8601": + t, err := time.Parse(iso8601, string(data)) + if err != nil { + return err + } + + if checkType(val, reflect.Interface) == nil && val.IsNil() { + ptime := reflect.New(reflect.TypeOf(t)).Elem() + ptime.Set(reflect.ValueOf(t)) + val.Set(ptime) + } else if _, ok := val.Interface().(time.Time); !ok { + return TypeMismatchError(fmt.Sprintf("error: type mismatch error - can't decode %v to time", val.Kind())) + } else { + val.Set(reflect.ValueOf(t)) + } + case "boolean": + v, err := strconv.ParseBool(string(data)) + if err != nil { + return err + } + + if checkType(val, reflect.Interface) == nil && val.IsNil() { + pv := reflect.New(reflect.TypeOf(v)).Elem() + pv.SetBool(v) + val.Set(pv) + } else if err = checkType(val, reflect.Bool); err != nil { + return err + } else { + val.SetBool(v) + } + case "double": + if checkType(val, reflect.Interface) == nil && val.IsNil() { + i, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return err + } + + pdouble := reflect.New(reflect.TypeOf(i)).Elem() + pdouble.SetFloat(i) + val.Set(pdouble) + } else if err = checkType(val, reflect.Float32, reflect.Float64); err != nil { + return err + } else { + i, err := strconv.ParseFloat(string(data), val.Type().Bits()) + if err != nil { + return err + } + + val.SetFloat(i) + } + default: + return errors.New("unsupported type") + } + + // + if err = dec.Skip(); err != nil { + return err + } + } + + return nil +} + +func (dec *decoder) readTag() (string, []byte, error) { + var tok xml.Token + var err error + + var name string + for { + if tok, err = dec.Token(); err != nil { + return "", nil, err + } + + if t, ok := tok.(xml.StartElement); ok { + name = t.Name.Local + break + } + } + + value, err := dec.readCharData() + if err != nil { + return "", nil, err + } + + return name, value, dec.Skip() +} + +func (dec *decoder) readCharData() ([]byte, error) { + var tok xml.Token + var err error + + if tok, err = dec.Token(); err != nil { + return nil, err + } + + if t, ok := tok.(xml.CharData); ok { + return []byte(t.Copy()), nil + } else { + return nil, invalidXmlError + } +} + +func checkType(val reflect.Value, kinds ...reflect.Kind) error { + if len(kinds) == 0 { + return nil + } + + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + + match := false + + for _, kind := range kinds { + if val.Kind() == kind { + match = true + break + } + } + + if !match { + return TypeMismatchError(fmt.Sprintf("error: type mismatch - can't unmarshal %v to %v", + val.Kind(), kinds[0])) + } + + return nil +} diff --git a/vendor/github.com/kolo/xmlrpc/encoder.go b/vendor/github.com/kolo/xmlrpc/encoder.go new file mode 100644 index 000000000..bb1285ff7 --- /dev/null +++ b/vendor/github.com/kolo/xmlrpc/encoder.go @@ -0,0 +1,164 @@ +package xmlrpc + +import ( + "bytes" + "encoding/xml" + "fmt" + "reflect" + "strconv" + "time" +) + +type encodeFunc func(reflect.Value) ([]byte, error) + +func marshal(v interface{}) ([]byte, error) { + if v == nil { + return []byte{}, nil + } + + val := reflect.ValueOf(v) + return encodeValue(val) +} + +func encodeValue(val reflect.Value) ([]byte, error) { + var b []byte + var err error + + if val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { + if val.IsNil() { + return []byte(""), nil + } + + val = val.Elem() + } + + switch val.Kind() { + case reflect.Struct: + switch val.Interface().(type) { + case time.Time: + t := val.Interface().(time.Time) + b = []byte(fmt.Sprintf("%s", t.Format(iso8601))) + default: + b, err = encodeStruct(val) + } + case reflect.Map: + b, err = encodeMap(val) + case reflect.Slice: + b, err = encodeSlice(val) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + b = []byte(fmt.Sprintf("%s", strconv.FormatInt(val.Int(), 10))) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + b = []byte(fmt.Sprintf("%s", strconv.FormatUint(val.Uint(), 10))) + case reflect.Float32, reflect.Float64: + b = []byte(fmt.Sprintf("%s", + strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()))) + case reflect.Bool: + if val.Bool() { + b = []byte("1") + } else { + b = []byte("0") + } + case reflect.String: + var buf bytes.Buffer + + xml.Escape(&buf, []byte(val.String())) + + if _, ok := val.Interface().(Base64); ok { + b = []byte(fmt.Sprintf("%s", buf.String())) + } else { + b = []byte(fmt.Sprintf("%s", buf.String())) + } + default: + return nil, fmt.Errorf("xmlrpc encode error: unsupported type") + } + + if err != nil { + return nil, err + } + + return []byte(fmt.Sprintf("%s", string(b))), nil +} + +func encodeStruct(val reflect.Value) ([]byte, error) { + var b bytes.Buffer + + b.WriteString("") + + t := val.Type() + for i := 0; i < t.NumField(); i++ { + b.WriteString("") + f := t.Field(i) + + name := f.Tag.Get("xmlrpc") + if name == "" { + name = f.Name + } + b.WriteString(fmt.Sprintf("%s", name)) + + p, err := encodeValue(val.FieldByName(f.Name)) + if err != nil { + return nil, err + } + b.Write(p) + + b.WriteString("") + } + + b.WriteString("") + + return b.Bytes(), nil +} + +func encodeMap(val reflect.Value) ([]byte, error) { + var t = val.Type() + + if t.Key().Kind() != reflect.String { + return nil, fmt.Errorf("xmlrpc encode error: only maps with string keys are supported") + } + + var b bytes.Buffer + + b.WriteString("") + + keys := val.MapKeys() + + for i := 0; i < val.Len(); i++ { + key := keys[i] + kval := val.MapIndex(key) + + b.WriteString("") + b.WriteString(fmt.Sprintf("%s", key.String())) + + p, err := encodeValue(kval) + + if err != nil { + return nil, err + } + + b.Write(p) + b.WriteString("") + } + + b.WriteString("") + + return b.Bytes(), nil +} + +func encodeSlice(val reflect.Value) ([]byte, error) { + var b bytes.Buffer + + b.WriteString("") + + for i := 0; i < val.Len(); i++ { + p, err := encodeValue(val.Index(i)) + if err != nil { + return nil, err + } + + b.Write(p) + } + + b.WriteString("") + + return b.Bytes(), nil +} diff --git a/vendor/github.com/kolo/xmlrpc/request.go b/vendor/github.com/kolo/xmlrpc/request.go new file mode 100644 index 000000000..acb8251b2 --- /dev/null +++ b/vendor/github.com/kolo/xmlrpc/request.go @@ -0,0 +1,57 @@ +package xmlrpc + +import ( + "bytes" + "fmt" + "net/http" +) + +func NewRequest(url string, method string, args interface{}) (*http.Request, error) { + var t []interface{} + var ok bool + if t, ok = args.([]interface{}); !ok { + if args != nil { + t = []interface{}{args} + } + } + + body, err := EncodeMethodCall(method, t...) + if err != nil { + return nil, err + } + + request, err := http.NewRequest("POST", url, bytes.NewReader(body)) + if err != nil { + return nil, err + } + + request.Header.Set("Content-Type", "text/xml") + request.Header.Set("Content-Length", fmt.Sprintf("%d", len(body))) + + return request, nil +} + +func EncodeMethodCall(method string, args ...interface{}) ([]byte, error) { + var b bytes.Buffer + b.WriteString(``) + b.WriteString(fmt.Sprintf("%s", method)) + + if args != nil { + b.WriteString("") + + for _, arg := range args { + p, err := marshal(arg) + if err != nil { + return nil, err + } + + b.WriteString(fmt.Sprintf("%s", string(p))) + } + + b.WriteString("") + } + + b.WriteString("") + + return b.Bytes(), nil +} diff --git a/vendor/github.com/kolo/xmlrpc/response.go b/vendor/github.com/kolo/xmlrpc/response.go new file mode 100644 index 000000000..6742a1c74 --- /dev/null +++ b/vendor/github.com/kolo/xmlrpc/response.go @@ -0,0 +1,52 @@ +package xmlrpc + +import ( + "regexp" +) + +var ( + faultRx = regexp.MustCompile(`(\s|\S)+`) +) + +type failedResponse struct { + Code int `xmlrpc:"faultCode"` + Error string `xmlrpc:"faultString"` +} + +func (r *failedResponse) err() error { + return &xmlrpcError{ + code: r.Code, + err: r.Error, + } +} + +type Response struct { + data []byte +} + +func NewResponse(data []byte) *Response { + return &Response{ + data: data, + } +} + +func (r *Response) Failed() bool { + return faultRx.Match(r.data) +} + +func (r *Response) Err() error { + failedResp := new(failedResponse) + if err := unmarshal(r.data, failedResp); err != nil { + return err + } + + return failedResp.err() +} + +func (r *Response) Unmarshal(v interface{}) error { + if err := unmarshal(r.data, v); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/kolo/xmlrpc/test_server.rb b/vendor/github.com/kolo/xmlrpc/test_server.rb new file mode 100644 index 000000000..1b1ff8760 --- /dev/null +++ b/vendor/github.com/kolo/xmlrpc/test_server.rb @@ -0,0 +1,25 @@ +# encoding: utf-8 + +require "xmlrpc/server" + +class Service + def time + Time.now + end + + def upcase(s) + s.upcase + end + + def sum(x, y) + x + y + end + + def error + raise XMLRPC::FaultException.new(500, "Server error") + end +end + +server = XMLRPC::Server.new 5001, 'localhost' +server.add_handler "service", Service.new +server.serve diff --git a/vendor/github.com/kolo/xmlrpc/xmlrpc.go b/vendor/github.com/kolo/xmlrpc/xmlrpc.go new file mode 100644 index 000000000..8766403af --- /dev/null +++ b/vendor/github.com/kolo/xmlrpc/xmlrpc.go @@ -0,0 +1,19 @@ +package xmlrpc + +import ( + "fmt" +) + +// xmlrpcError represents errors returned on xmlrpc request. +type xmlrpcError struct { + code int + err string +} + +// Error() method implements Error interface +func (e *xmlrpcError) Error() string { + return fmt.Sprintf("error: \"%s\" code: %d", e.err, e.code) +} + +// Base64 represents value in base64 encoding +type Base64 string diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS new file mode 100644 index 000000000..196568352 --- /dev/null +++ b/vendor/github.com/miekg/dns/AUTHORS @@ -0,0 +1 @@ +Miek Gieben diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS new file mode 100644 index 000000000..f77e8a895 --- /dev/null +++ b/vendor/github.com/miekg/dns/CONTRIBUTORS @@ -0,0 +1,9 @@ +Alex A. Skinner +Andrew Tunnell-Jones +Ask Bjørn Hansen +Dave Cheney +Dusty Wilson +Marek Majkowski +Peter van Dijk +Omri Bahumi +Alex Sergeyev diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT new file mode 100644 index 000000000..35702b10e --- /dev/null +++ b/vendor/github.com/miekg/dns/COPYRIGHT @@ -0,0 +1,9 @@ +Copyright 2009 The Go Authors. All rights reserved. Use of this source code +is governed by a BSD-style license that can be found in the LICENSE file. +Extensions of the original work are copyright (c) 2011 Miek Gieben + +Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. + +Copyright 2014 CloudFlare. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE new file mode 100644 index 000000000..5763fa7fe --- /dev/null +++ b/vendor/github.com/miekg/dns/LICENSE @@ -0,0 +1,32 @@ +Extensions of the original work are copyright (c) 2011 Miek Gieben + +As this is fork of the official Go code the same license applies: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md new file mode 100644 index 000000000..83b4183eb --- /dev/null +++ b/vendor/github.com/miekg/dns/README.md @@ -0,0 +1,151 @@ +[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) + +# Alternative (more granular) approach to a DNS library + +> Less is more. + +Complete and usable DNS library. All widely used Resource Records are +supported, including the DNSSEC types. It follows a lean and mean philosophy. +If there is stuff you should know as a DNS programmer there isn't a convenience +function for it. Server side and client side programming is supported, i.e. you +can build servers and resolvers with it. + +We try to keep the "master" branch as sane as possible and at the bleeding edge +of standards, avoiding breaking changes wherever reasonable. We support the last +two versions of Go, currently: 1.5 and 1.6. + +# Goals + +* KISS; +* Fast; +* Small API, if its easy to code in Go, don't make a function for it. + +# Users + +A not-so-up-to-date-list-that-may-be-actually-current: + +* https://cloudflare.com +* https://github.com/abh/geodns +* http://www.statdns.com/ +* http://www.dnsinspect.com/ +* https://github.com/chuangbo/jianbing-dictionary-dns +* http://www.dns-lg.com/ +* https://github.com/fcambus/rrda +* https://github.com/kenshinx/godns +* https://github.com/skynetservices/skydns +* https://github.com/hashicorp/consul +* https://github.com/DevelopersPL/godnsagent +* https://github.com/duedil-ltd/discodns +* https://github.com/StalkR/dns-reverse-proxy +* https://github.com/tianon/rawdns +* https://mesosphere.github.io/mesos-dns/ +* https://pulse.turbobytes.com/ +* https://play.google.com/store/apps/details?id=com.turbobytes.dig +* https://github.com/fcambus/statzone +* https://github.com/benschw/dns-clb-go +* https://github.com/corny/dnscheck for http://public-dns.info/ +* https://namesmith.io +* https://github.com/miekg/unbound +* https://github.com/miekg/exdns +* https://dnslookup.org +* https://github.com/looterz/grimd +* https://github.com/phamhongviet/serf-dns + +Send pull request if you want to be listed here. + +# Features + +* UDP/TCP queries, IPv4 and IPv6; +* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported; +* Fast: + * Reply speed around ~ 80K qps (faster hardware results in more qps); + * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds; +* Server side programming (mimicking the net/http package); +* Client side programming; +* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA; +* EDNS0, NSID, Cookies; +* AXFR/IXFR; +* TSIG, SIG(0); +* DNS over TLS: optional encrypted connection between client and server; +* DNS name compression; +* Depends only on the standard library. + +Have fun! + +Miek Gieben - 2010-2012 - + +# Building + +Building is done with the `go` tool. If you have setup your GOPATH +correctly, the following should work: + + go get github.com/miekg/dns + go build github.com/miekg/dns + +## Examples + +A short "how to use the API" is at the beginning of doc.go (this also will show +when you call `godoc github.com/miekg/dns`). + +Example programs can be found in the `github.com/miekg/exdns` repository. + +## Supported RFCs + +*all of them* + +* 103{4,5} - DNS standard +* 1348 - NSAP record (removed the record) +* 1982 - Serial Arithmetic +* 1876 - LOC record +* 1995 - IXFR +* 1996 - DNS notify +* 2136 - DNS Update (dynamic updates) +* 2181 - RRset definition - there is no RRset type though, just []RR +* 2537 - RSAMD5 DNS keys +* 2065 - DNSSEC (updated in later RFCs) +* 2671 - EDNS record +* 2782 - SRV record +* 2845 - TSIG record +* 2915 - NAPTR record +* 2929 - DNS IANA Considerations +* 3110 - RSASHA1 DNS keys +* 3225 - DO bit (DNSSEC OK) +* 340{1,2,3} - NAPTR record +* 3445 - Limiting the scope of (DNS)KEY +* 3597 - Unknown RRs +* 403{3,4,5} - DNSSEC + validation functions +* 4255 - SSHFP record +* 4343 - Case insensitivity +* 4408 - SPF record +* 4509 - SHA256 Hash in DS +* 4592 - Wildcards in the DNS +* 4635 - HMAC SHA TSIG +* 4701 - DHCID +* 4892 - id.server +* 5001 - NSID +* 5155 - NSEC3 record +* 5205 - HIP record +* 5702 - SHA2 in the DNS +* 5936 - AXFR +* 5966 - TCP implementation recommendations +* 6605 - ECDSA +* 6725 - IANA Registry Update +* 6742 - ILNP DNS +* 6840 - Clarifications and Implementation Notes for DNS Security +* 6844 - CAA record +* 6891 - EDNS0 update +* 6895 - DNS IANA considerations +* 6975 - Algorithm Understanding in DNSSEC +* 7043 - EUI48/EUI64 records +* 7314 - DNS (EDNS) EXPIRE Option +* 7553 - URI record +* 7858 - DNS over TLS: Initiation and Performance Considerations (draft) +* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies) +* xxxx - EDNS0 DNS Update Lease (draft) + +## Loosely based upon + +* `ldns` +* `NSD` +* `Net::DNS` +* `GRONG` diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go new file mode 100644 index 000000000..1302e4e04 --- /dev/null +++ b/vendor/github.com/miekg/dns/client.go @@ -0,0 +1,455 @@ +package dns + +// A client implementation. + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "io" + "net" + "time" +) + +const dnsTimeout time.Duration = 2 * time.Second +const tcpIdleTimeout time.Duration = 8 * time.Second + +// A Conn represents a connection to a DNS server. +type Conn struct { + net.Conn // a net.Conn holding the connection + UDPSize uint16 // minimum receive buffer for UDP messages + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified + rtt time.Duration + t time.Time + tsigRequestMAC string +} + +// A Client defines parameters for a DNS client. +type Client struct { + Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) + UDPSize uint16 // minimum receive buffer for UDP messages + TLSConfig *tls.Config // TLS connection configuration + Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified + SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass + group singleflight +} + +// Exchange performs a synchronous UDP query. It sends the message m to the address +// contained in a and waits for an reply. Exchange does not retry a failed query, nor +// will it fall back to TCP in case of truncation. +// See client.Exchange for more information on setting larger buffer sizes. +func Exchange(m *Msg, a string) (r *Msg, err error) { + var co *Conn + co, err = DialTimeout("udp", a, dnsTimeout) + if err != nil { + return nil, err + } + + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + + co.SetWriteDeadline(time.Now().Add(dnsTimeout)) + if err = co.WriteMsg(m); err != nil { + return nil, err + } + + co.SetReadDeadline(time.Now().Add(dnsTimeout)) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// ExchangeConn performs a synchronous query. It sends the message m via the connection +// c and waits for a reply. The connection c is not closed by ExchangeConn. +// This function is going away, but can easily be mimicked: +// +// co := &dns.Conn{Conn: c} // c is your net.Conn +// co.WriteMsg(m) +// in, _ := co.ReadMsg() +// co.Close() +// +func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { + println("dns: this function is deprecated") + co := new(Conn) + co.Conn = c + if err = co.WriteMsg(m); err != nil { + return nil, err + } + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// Exchange performs an synchronous query. It sends the message m to the address +// contained in a and waits for an reply. Basic use pattern with a *dns.Client: +// +// c := new(dns.Client) +// in, rtt, err := c.Exchange(message, "127.0.0.1:53") +// +// Exchange does not retry a failed query, nor will it fall back to TCP in +// case of truncation. +// It is up to the caller to create a message that allows for larger responses to be +// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger +// buffer, see SetEdns0. Messsages without an OPT RR will fallback to the historic limit +// of 512 bytes. +func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + if !c.SingleInflight { + return c.exchange(m, a) + } + // This adds a bunch of garbage, TODO(miek). + t := "nop" + if t1, ok := TypeToString[m.Question[0].Qtype]; ok { + t = t1 + } + cl := "nop" + if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { + cl = cl1 + } + r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { + return c.exchange(m, a) + }) + if err != nil { + return r, rtt, err + } + if shared { + return r.Copy(), rtt, nil + } + return r, rtt, nil +} + +func (c *Client) dialTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + if c.DialTimeout != 0 { + return c.DialTimeout + } + return dnsTimeout +} + +func (c *Client) readTimeout() time.Duration { + if c.ReadTimeout != 0 { + return c.ReadTimeout + } + return dnsTimeout +} + +func (c *Client) writeTimeout() time.Duration { + if c.WriteTimeout != 0 { + return c.WriteTimeout + } + return dnsTimeout +} + +func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var co *Conn + network := "udp" + tls := false + + switch c.Net { + case "tcp-tls": + network = "tcp" + tls = true + case "tcp4-tls": + network = "tcp4" + tls = true + case "tcp6-tls": + network = "tcp6" + tls = true + default: + if c.Net != "" { + network = c.Net + } + } + + var deadline time.Time + if c.Timeout != 0 { + deadline = time.Now().Add(c.Timeout) + } + + if tls { + co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, c.dialTimeout()) + } else { + co, err = DialTimeout(network, a, c.dialTimeout()) + } + + if err != nil { + return nil, 0, err + } + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + // Otherwise use the client's configured UDP size. + if opt == nil && c.UDPSize >= MinMsgSize { + co.UDPSize = c.UDPSize + } + + co.TsigSecret = c.TsigSecret + co.SetWriteDeadline(deadlineOrTimeout(deadline, c.writeTimeout())) + if err = co.WriteMsg(m); err != nil { + return nil, 0, err + } + + co.SetReadDeadline(deadlineOrTimeout(deadline, c.readTimeout())) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, co.rtt, err +} + +// ReadMsg reads a message from the connection co. +// If the received message contains a TSIG record the transaction +// signature is verified. +func (co *Conn) ReadMsg() (*Msg, error) { + p, err := co.ReadMsgHeader(nil) + if err != nil { + return nil, err + } + + m := new(Msg) + if err := m.Unpack(p); err != nil { + // If ErrTruncated was returned, we still want to allow the user to use + // the message, but naively they can just check err if they don't want + // to use a truncated message + if err == ErrTruncated { + return m, err + } + return nil, err + } + if t := m.IsTsig(); t != nil { + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + } + return m, err +} + +// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). +// Returns message as a byte slice to be parsed with Msg.Unpack later on. +// Note that error handling on the message body is not possible as only the header is parsed. +func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { + var ( + p []byte + n int + err error + ) + + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + // First two bytes specify the length of the entire message. + l, err := tcpMsgLen(r) + if err != nil { + return nil, err + } + p = make([]byte, l) + n, err = tcpRead(r, p) + co.rtt = time.Since(co.t) + default: + if co.UDPSize > MinMsgSize { + p = make([]byte, co.UDPSize) + } else { + p = make([]byte, MinMsgSize) + } + n, err = co.Read(p) + co.rtt = time.Since(co.t) + } + + if err != nil { + return nil, err + } else if n < headerSize { + return nil, ErrShortRead + } + + p = p[:n] + if hdr != nil { + dh, _, err := unpackMsgHdr(p, 0) + if err != nil { + return nil, err + } + *hdr = dh + } + return p, err +} + +// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length. +func tcpMsgLen(t io.Reader) (int, error) { + p := []byte{0, 0} + n, err := t.Read(p) + if err != nil { + return 0, err + } + if n != 2 { + return 0, ErrShortRead + } + l := binary.BigEndian.Uint16(p) + if l == 0 { + return 0, ErrShortRead + } + return int(l), nil +} + +// tcpRead calls TCPConn.Read enough times to fill allocated buffer. +func tcpRead(t io.Reader, p []byte) (int, error) { + n, err := t.Read(p) + if err != nil { + return n, err + } + for n < len(p) { + j, err := t.Read(p[n:]) + if err != nil { + return n, err + } + n += j + } + return n, err +} + +// Read implements the net.Conn read method. +func (co *Conn) Read(p []byte) (n int, err error) { + if co.Conn == nil { + return 0, ErrConnEmpty + } + if len(p) < 2 { + return 0, io.ErrShortBuffer + } + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + l, err := tcpMsgLen(r) + if err != nil { + return 0, err + } + if l > len(p) { + return int(l), io.ErrShortBuffer + } + return tcpRead(r, p[:l]) + } + // UDP connection + n, err = co.Conn.Read(p) + if err != nil { + return n, err + } + return n, err +} + +// WriteMsg sends a message through the connection co. +// If the message m contains a TSIG record the transaction +// signature is calculated. +func (co *Conn) WriteMsg(m *Msg) (err error) { + var out []byte + if t := m.IsTsig(); t != nil { + mac := "" + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return ErrSecret + } + out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + // Set for the next read, although only used in zone transfers + co.tsigRequestMAC = mac + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + co.t = time.Now() + if _, err = co.Write(out); err != nil { + return err + } + return nil +} + +// Write implements the net.Conn Write method. +func (co *Conn) Write(p []byte) (n int, err error) { + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + w := t.(io.Writer) + + lp := len(p) + if lp < 2 { + return 0, io.ErrShortBuffer + } + if lp > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, lp+2) + binary.BigEndian.PutUint16(l, uint16(lp)) + p = append(l, p...) + n, err := io.Copy(w, bytes.NewReader(p)) + return int(n), err + } + n, err = co.Conn.(*net.UDPConn).Write(p) + return n, err +} + +// Dial connects to the address on the named network. +func Dial(network, address string) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.Dial(network, address) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeout acts like Dial but takes a timeout. +func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.DialTimeout(network, address, timeout) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialWithTLS connects to the address on the named network with TLS. +func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = tls.Dial(network, address, tlsConfig) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. +func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { + var dialer net.Dialer + dialer.Timeout = timeout + + conn = new(Conn) + conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig) + if err != nil { + return nil, err + } + return conn, nil +} + +func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time { + if deadline.IsZero() { + return time.Now().Add(timeout) + } + return deadline +} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go new file mode 100644 index 000000000..cfa9ad0b2 --- /dev/null +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -0,0 +1,99 @@ +package dns + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +// ClientConfig wraps the contents of the /etc/resolv.conf file. +type ClientConfig struct { + Servers []string // servers to use + Search []string // suffixes to append to local name + Port string // what port to use + Ndots int // number of dots in name to trigger absolute lookup + Timeout int // seconds before giving up on packet + Attempts int // lost packets before giving up on server, not used in the package dns +} + +// ClientConfigFromFile parses a resolv.conf(5) like file and returns +// a *ClientConfig. +func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { + file, err := os.Open(resolvconf) + if err != nil { + return nil, err + } + defer file.Close() + c := new(ClientConfig) + scanner := bufio.NewScanner(file) + c.Servers = make([]string, 0) + c.Search = make([]string, 0) + c.Port = "53" + c.Ndots = 1 + c.Timeout = 5 + c.Attempts = 2 + + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return nil, err + } + line := scanner.Text() + f := strings.Fields(line) + if len(f) < 1 { + continue + } + switch f[0] { + case "nameserver": // add one name server + if len(f) > 1 { + // One more check: make sure server name is + // just an IP address. Otherwise we need DNS + // to look it up. + name := f[1] + c.Servers = append(c.Servers, name) + } + + case "domain": // set search path to just this domain + if len(f) > 1 { + c.Search = make([]string, 1) + c.Search[0] = f[1] + } else { + c.Search = make([]string, 0) + } + + case "search": // set search path to given servers + c.Search = make([]string, len(f)-1) + for i := 0; i < len(c.Search); i++ { + c.Search[i] = f[i+1] + } + + case "options": // magic options + for i := 1; i < len(f); i++ { + s := f[i] + switch { + case len(s) >= 6 && s[:6] == "ndots:": + n, _ := strconv.Atoi(s[6:]) + if n < 1 { + n = 1 + } + c.Ndots = n + case len(s) >= 8 && s[:8] == "timeout:": + n, _ := strconv.Atoi(s[8:]) + if n < 1 { + n = 1 + } + c.Timeout = n + case len(s) >= 8 && s[:9] == "attempts:": + n, _ := strconv.Atoi(s[9:]) + if n < 1 { + n = 1 + } + c.Attempts = n + case s == "rotate": + /* not imp */ + } + } + } + } + return c, nil +} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go new file mode 100644 index 000000000..cf456165f --- /dev/null +++ b/vendor/github.com/miekg/dns/defaults.go @@ -0,0 +1,282 @@ +package dns + +import ( + "errors" + "net" + "strconv" +) + +const hexDigit = "0123456789abcdef" + +// Everything is assumed in ClassINET. + +// SetReply creates a reply message from a request message. +func (dns *Msg) SetReply(request *Msg) *Msg { + dns.Id = request.Id + dns.RecursionDesired = request.RecursionDesired // Copy rd bit + dns.Response = true + dns.Opcode = OpcodeQuery + dns.Rcode = RcodeSuccess + if len(request.Question) > 0 { + dns.Question = make([]Question, 1) + dns.Question[0] = request.Question[0] + } + return dns +} + +// SetQuestion creates a question message, it sets the Question +// section, generates an Id and sets the RecursionDesired (RD) +// bit to true. +func (dns *Msg) SetQuestion(z string, t uint16) *Msg { + dns.Id = Id() + dns.RecursionDesired = true + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, t, ClassINET} + return dns +} + +// SetNotify creates a notify message, it sets the Question +// section, generates an Id and sets the Authoritative (AA) +// bit to true. +func (dns *Msg) SetNotify(z string) *Msg { + dns.Opcode = OpcodeNotify + dns.Authoritative = true + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetRcode creates an error message suitable for the request. +func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { + dns.SetReply(request) + dns.Rcode = rcode + return dns +} + +// SetRcodeFormatError creates a message with FormError set. +func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { + dns.Rcode = RcodeFormatError + dns.Opcode = OpcodeQuery + dns.Response = true + dns.Authoritative = false + dns.Id = request.Id + return dns +} + +// SetUpdate makes the message a dynamic update message. It +// sets the ZONE section to: z, TypeSOA, ClassINET. +func (dns *Msg) SetUpdate(z string) *Msg { + dns.Id = Id() + dns.Response = false + dns.Opcode = OpcodeUpdate + dns.Compress = false // BIND9 cannot handle compression + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetIxfr creates message for requesting an IXFR. +func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Ns = make([]RR, 1) + s := new(SOA) + s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} + s.Serial = serial + s.Ns = ns + s.Mbox = mbox + dns.Question[0] = Question{z, TypeIXFR, ClassINET} + dns.Ns[0] = s + return dns +} + +// SetAxfr creates message for requesting an AXFR. +func (dns *Msg) SetAxfr(z string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeAXFR, ClassINET} + return dns +} + +// SetTsig appends a TSIG RR to the message. +// This is only a skeleton TSIG RR that is added as the last RR in the +// additional section. The Tsig is calculated when the message is being send. +func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg { + t := new(TSIG) + t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} + t.Algorithm = algo + t.Fudge = 300 + t.TimeSigned = uint64(timesigned) + t.OrigId = dns.Id + dns.Extra = append(dns.Extra, t) + return dns +} + +// SetEdns0 appends a EDNS0 OPT RR to the message. +// TSIG should always the last RR in a message. +func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { + e := new(OPT) + e.Hdr.Name = "." + e.Hdr.Rrtype = TypeOPT + e.SetUDPSize(udpsize) + if do { + e.SetDo() + } + dns.Extra = append(dns.Extra, e) + return dns +} + +// IsTsig checks if the message has a TSIG record as the last record +// in the additional section. It returns the TSIG record found or nil. +func (dns *Msg) IsTsig() *TSIG { + if len(dns.Extra) > 0 { + if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { + return dns.Extra[len(dns.Extra)-1].(*TSIG) + } + } + return nil +} + +// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 +// record in the additional section will do. It returns the OPT record +// found or nil. +func (dns *Msg) IsEdns0() *OPT { + // EDNS0 is at the end of the additional section, start there. + // We might want to change this to *only* look at the last two + // records. So we see TSIG and/or OPT - this a slightly bigger + // change though. + for i := len(dns.Extra) - 1; i >= 0; i-- { + if dns.Extra[i].Header().Rrtype == TypeOPT { + return dns.Extra[i].(*OPT) + } + } + return nil +} + +// IsDomainName checks if s is a valid domain name, it returns the number of +// labels and true, when a domain name is valid. Note that non fully qualified +// domain name is considered valid, in this case the last label is counted in +// the number of labels. When false is returned the number of labels is not +// defined. Also note that this function is extremely liberal; almost any +// string is a valid domain name as the DNS is 8 bit protocol. It checks if each +// label fits in 63 characters, but there is no length check for the entire +// string s. I.e. a domain name longer than 255 characters is considered valid. +func IsDomainName(s string) (labels int, ok bool) { + _, labels, err := packDomainName(s, nil, 0, nil, false) + return labels, err == nil +} + +// IsSubDomain checks if child is indeed a child of the parent. If child and parent +// are the same domain true is returned as well. +func IsSubDomain(parent, child string) bool { + // Entire child is contained in parent + return CompareDomainName(parent, child) == CountLabel(parent) +} + +// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. +// The checking is performed on the binary payload. +func IsMsg(buf []byte) error { + // Header + if len(buf) < 12 { + return errors.New("dns: bad message header") + } + // Header: Opcode + // TODO(miek): more checks here, e.g. check all header bits. + return nil +} + +// IsFqdn checks if a domain name is fully qualified. +func IsFqdn(s string) bool { + l := len(s) + if l == 0 { + return false + } + return s[l-1] == '.' +} + +// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. +// This means the RRs need to have the same type, name, and class. Returns true +// if the RR set is valid, otherwise false. +func IsRRset(rrset []RR) bool { + if len(rrset) == 0 { + return false + } + if len(rrset) == 1 { + return true + } + rrHeader := rrset[0].Header() + rrType := rrHeader.Rrtype + rrClass := rrHeader.Class + rrName := rrHeader.Name + + for _, rr := range rrset[1:] { + curRRHeader := rr.Header() + if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { + // Mismatch between the records, so this is not a valid rrset for + //signing/verifying + return false + } + } + + return true +} + +// Fqdn return the fully qualified domain name from s. +// If s is already fully qualified, it behaves as the identity function. +func Fqdn(s string) string { + if IsFqdn(s) { + return s + } + return s + "." +} + +// Copied from the official Go code. + +// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP +// address suitable for reverse DNS (PTR) record lookups or an error if it fails +// to parse the IP address. +func ReverseAddr(addr string) (arpa string, err error) { + ip := net.ParseIP(addr) + if ip == nil { + return "", &Error{err: "unrecognized address: " + addr} + } + if ip.To4() != nil { + return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + + strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil + } + // Must be IPv6 + buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) + // Add it, in reverse, to the buffer + for i := len(ip) - 1; i >= 0; i-- { + v := ip[i] + buf = append(buf, hexDigit[v&0xF]) + buf = append(buf, '.') + buf = append(buf, hexDigit[v>>4]) + buf = append(buf, '.') + } + // Append "ip6.arpa." and return (buf already has the final .) + buf = append(buf, "ip6.arpa."...) + return string(buf), nil +} + +// String returns the string representation for the type t. +func (t Type) String() string { + if t1, ok := TypeToString[uint16(t)]; ok { + return t1 + } + return "TYPE" + strconv.Itoa(int(t)) +} + +// String returns the string representation for the class c. +func (c Class) String() string { + if c1, ok := ClassToString[uint16(c)]; ok { + return c1 + } + return "CLASS" + strconv.Itoa(int(c)) +} + +// String returns the string representation for the name n. +func (n Name) String() string { + return sprintName(string(n)) +} diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go new file mode 100644 index 000000000..b3292287c --- /dev/null +++ b/vendor/github.com/miekg/dns/dns.go @@ -0,0 +1,104 @@ +package dns + +import "strconv" + +const ( + year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. + defaultTtl = 3600 // Default internal TTL. + + DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes. + MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet. + MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet. +) + +// Error represents a DNS error. +type Error struct{ err string } + +func (e *Error) Error() string { + if e == nil { + return "dns: " + } + return "dns: " + e.err +} + +// An RR represents a resource record. +type RR interface { + // Header returns the header of an resource record. The header contains + // everything up to the rdata. + Header() *RR_Header + // String returns the text representation of the resource record. + String() string + + // copy returns a copy of the RR + copy() RR + // len returns the length (in octets) of the uncompressed RR in wire format. + len() int + // pack packs an RR into wire format. + pack([]byte, int, map[string]int, bool) (int, error) +} + +// RR_Header is the header all DNS resource records share. +type RR_Header struct { + Name string `dns:"cdomain-name"` + Rrtype uint16 + Class uint16 + Ttl uint32 + Rdlength uint16 // Length of data after header. +} + +// Header returns itself. This is here to make RR_Header implements the RR interface. +func (h *RR_Header) Header() *RR_Header { return h } + +// Just to implement the RR interface. +func (h *RR_Header) copy() RR { return nil } + +func (h *RR_Header) copyHeader() *RR_Header { + r := new(RR_Header) + r.Name = h.Name + r.Rrtype = h.Rrtype + r.Class = h.Class + r.Ttl = h.Ttl + r.Rdlength = h.Rdlength + return r +} + +func (h *RR_Header) String() string { + var s string + + if h.Rrtype == TypeOPT { + s = ";" + // and maybe other things + } + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += Class(h.Class).String() + "\t" + s += Type(h.Rrtype).String() + "\t" + return s +} + +func (h *RR_Header) len() int { + l := len(h.Name) + 1 + l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) + return l +} + +// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. +func (rr *RFC3597) ToRFC3597(r RR) error { + buf := make([]byte, r.len()*2) + off, err := PackRR(r, buf, 0, nil, false) + if err != nil { + return err + } + buf = buf[:off] + if int(r.Header().Rdlength) > off { + return ErrBuf + } + + rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength)) + if err != nil { + return err + } + *rr = *rfc3597.(*RFC3597) + return nil +} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go new file mode 100644 index 000000000..f5f3fbdd8 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -0,0 +1,721 @@ +package dns + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + _ "crypto/md5" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "math/big" + "sort" + "strings" + "time" +) + +// DNSSEC encryption algorithm codes. +const ( + _ uint8 = iota + RSAMD5 + DH + DSA + _ // Skip 4, RFC 6725, section 2.1 + RSASHA1 + DSANSEC3SHA1 + RSASHA1NSEC3SHA1 + RSASHA256 + _ // Skip 9, RFC 6725, section 2.1 + RSASHA512 + _ // Skip 11, RFC 6725, section 2.1 + ECCGOST + ECDSAP256SHA256 + ECDSAP384SHA384 + INDIRECT uint8 = 252 + PRIVATEDNS uint8 = 253 // Private (experimental keys) + PRIVATEOID uint8 = 254 +) + +// Map for algorithm names. +var AlgorithmToString = map[uint8]string{ + RSAMD5: "RSAMD5", + DH: "DH", + DSA: "DSA", + RSASHA1: "RSASHA1", + DSANSEC3SHA1: "DSA-NSEC3-SHA1", + RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", + RSASHA256: "RSASHA256", + RSASHA512: "RSASHA512", + ECCGOST: "ECC-GOST", + ECDSAP256SHA256: "ECDSAP256SHA256", + ECDSAP384SHA384: "ECDSAP384SHA384", + INDIRECT: "INDIRECT", + PRIVATEDNS: "PRIVATEDNS", + PRIVATEOID: "PRIVATEOID", +} + +// Map of algorithm strings. +var StringToAlgorithm = reverseInt8(AlgorithmToString) + +// Map of algorithm crypto hashes. +var AlgorithmToHash = map[uint8]crypto.Hash{ + RSAMD5: crypto.MD5, // Deprecated in RFC 6725 + RSASHA1: crypto.SHA1, + RSASHA1NSEC3SHA1: crypto.SHA1, + RSASHA256: crypto.SHA256, + ECDSAP256SHA256: crypto.SHA256, + ECDSAP384SHA384: crypto.SHA384, + RSASHA512: crypto.SHA512, +} + +// DNSSEC hashing algorithm codes. +const ( + _ uint8 = iota + SHA1 // RFC 4034 + SHA256 // RFC 4509 + GOST94 // RFC 5933 + SHA384 // Experimental + SHA512 // Experimental +) + +// Map for hash names. +var HashToString = map[uint8]string{ + SHA1: "SHA1", + SHA256: "SHA256", + GOST94: "GOST94", + SHA384: "SHA384", + SHA512: "SHA512", +} + +// Map of hash strings. +var StringToHash = reverseInt8(HashToString) + +// DNSKEY flag values. +const ( + SEP = 1 + REVOKE = 1 << 7 + ZONE = 1 << 8 +) + +// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. +type rrsigWireFmt struct { + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + /* No Signature */ +} + +// Used for converting DNSKEY's rdata to wirefmt. +type dnskeyWireFmt struct { + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` + /* Nothing is left out */ +} + +func divRoundUp(a, b int) int { + return (a + b - 1) / b +} + +// KeyTag calculates the keytag (or key-id) of the DNSKEY. +func (k *DNSKEY) KeyTag() uint16 { + if k == nil { + return 0 + } + var keytag int + switch k.Algorithm { + case RSAMD5: + // Look at the bottom two bytes of the modules, which the last + // item in the pubkey. We could do this faster by looking directly + // at the base64 values. But I'm lazy. + modulus, _ := fromBase64([]byte(k.PublicKey)) + if len(modulus) > 1 { + x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) + keytag = int(x) + } + default: + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return 0 + } + wire = wire[:n] + for i, v := range wire { + if i&1 != 0 { + keytag += int(v) // must be larger than uint32 + } else { + keytag += int(v) << 8 + } + } + keytag += (keytag >> 16) & 0xFFFF + keytag &= 0xFFFF + } + return uint16(keytag) +} + +// ToDS converts a DNSKEY record to a DS record. +func (k *DNSKEY) ToDS(h uint8) *DS { + if k == nil { + return nil + } + ds := new(DS) + ds.Hdr.Name = k.Hdr.Name + ds.Hdr.Class = k.Hdr.Class + ds.Hdr.Rrtype = TypeDS + ds.Hdr.Ttl = k.Hdr.Ttl + ds.Algorithm = k.Algorithm + ds.DigestType = h + ds.KeyTag = k.KeyTag() + + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return nil + } + wire = wire[:n] + + owner := make([]byte, 255) + off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) + if err1 != nil { + return nil + } + owner = owner[:off] + // RFC4034: + // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); + // "|" denotes concatenation + // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. + + // digest buffer + digest := append(owner, wire...) // another copy + + var hash crypto.Hash + switch h { + case SHA1: + hash = crypto.SHA1 + case SHA256: + hash = crypto.SHA256 + case SHA384: + hash = crypto.SHA384 + case SHA512: + hash = crypto.SHA512 + default: + return nil + } + + s := hash.New() + s.Write(digest) + ds.Digest = hex.EncodeToString(s.Sum(nil)) + return ds +} + +// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. +func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { + c := &CDNSKEY{DNSKEY: *k} + c.Hdr = *k.Hdr.copyHeader() + c.Hdr.Rrtype = TypeCDNSKEY + return c +} + +// ToCDS converts a DS record to a CDS record. +func (d *DS) ToCDS() *CDS { + c := &CDS{DS: *d} + c.Hdr = *d.Hdr.copyHeader() + c.Hdr.Rrtype = TypeCDS + return c +} + +// Sign signs an RRSet. The signature needs to be filled in with the values: +// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied +// from the RRset. Sign returns a non-nill error when the signing went OK. +// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non +// zero, it is used as-is, otherwise the TTL of the RRset is used as the +// OrigTTL. +func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { + if k == nil { + return ErrPrivKey + } + // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + rr.Hdr.Rrtype = TypeRRSIG + rr.Hdr.Name = rrset[0].Header().Name + rr.Hdr.Class = rrset[0].Header().Class + if rr.OrigTtl == 0 { // If set don't override + rr.OrigTtl = rrset[0].Header().Ttl + } + rr.TypeCovered = rrset[0].Header().Rrtype + rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) + + if strings.HasPrefix(rrset[0].Header().Name, "*") { + rr.Labels-- // wildcard, remove from label count + } + + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + // For signing, lowercase this name + sigwire.SignerName = strings.ToLower(rr.SignerName) + + // Create the desired binary blob + signdata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signdata) + if err != nil { + return err + } + signdata = signdata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + signdata = append(signdata, wire...) + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + h := hash.New() + h.Write(signdata) + + signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + + return nil +} + +func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { + signature, err := k.Sign(rand.Reader, hashed, hash) + if err != nil { + return nil, err + } + + switch alg { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: + return signature, nil + + case ECDSAP256SHA256, ECDSAP384SHA384: + ecdsaSignature := &struct { + R, S *big.Int + }{} + if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { + return nil, err + } + + var intlen int + switch alg { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + + signature := intToBytes(ecdsaSignature.R, intlen) + signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) + return signature, nil + + // There is no defined interface for what a DSA backed crypto.Signer returns + case DSA, DSANSEC3SHA1: + // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) + // signature := []byte{byte(t)} + // signature = append(signature, intToBytes(r1, 20)...) + // signature = append(signature, intToBytes(s1, 20)...) + // rr.Signature = signature + } + + return nil, ErrAlg +} + +// Verify validates an RRSet with the signature and key. This is only the +// cryptographic test, the signature validity period must be checked separately. +// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. +func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { + // First the easy checks + if !IsRRset(rrset) { + return ErrRRset + } + if rr.KeyTag != k.KeyTag() { + return ErrKey + } + if rr.Hdr.Class != k.Hdr.Class { + return ErrKey + } + if rr.Algorithm != k.Algorithm { + return ErrKey + } + if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) { + return ErrKey + } + if k.Protocol != 3 { + return ErrKey + } + + // IsRRset checked that we have at least one RR and that the RRs in + // the set have consistent type, class, and name. Also check that type and + // class matches the RRSIG record. + if rrset[0].Header().Class != rr.Hdr.Class { + return ErrRRset + } + if rrset[0].Header().Rrtype != rr.TypeCovered { + return ErrRRset + } + + // RFC 4035 5.3.2. Reconstructing the Signed Data + // Copy the sig, except the rrsig data + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + sigwire.SignerName = strings.ToLower(rr.SignerName) + // Create the desired binary blob + signeddata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signeddata) + if err != nil { + return err + } + signeddata = signeddata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + signeddata = append(signeddata, wire...) + + sigbuf := rr.sigBuf() // Get the binary signature data + if rr.Algorithm == PRIVATEDNS { // PRIVATEOID + // TODO(miek) + // remove the domain name and assume its ours? + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: + // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? + pubkey := k.publicKeyRSA() // Get the key + if pubkey == nil { + return ErrKey + } + + h := hash.New() + h.Write(signeddata) + return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) + + case ECDSAP256SHA256, ECDSAP384SHA384: + pubkey := k.publicKeyECDSA() + if pubkey == nil { + return ErrKey + } + + // Split sigbuf into the r and s coordinates + r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) + s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) + + h := hash.New() + h.Write(signeddata) + if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { + return nil + } + return ErrSig + + default: + return ErrAlg + } +} + +// ValidityPeriod uses RFC1982 serial arithmetic to calculate +// if a signature period is valid. If t is the zero time, the +// current time is taken other t is. Returns true if the signature +// is valid at the given time, otherwise returns false. +func (rr *RRSIG) ValidityPeriod(t time.Time) bool { + var utc int64 + if t.IsZero() { + utc = time.Now().UTC().Unix() + } else { + utc = t.UTC().Unix() + } + modi := (int64(rr.Inception) - utc) / year68 + mode := (int64(rr.Expiration) - utc) / year68 + ti := int64(rr.Inception) + (modi * year68) + te := int64(rr.Expiration) + (mode * year68) + return ti <= utc && utc <= te +} + +// Return the signatures base64 encodedig sigdata as a byte slice. +func (rr *RRSIG) sigBuf() []byte { + sigbuf, err := fromBase64([]byte(rr.Signature)) + if err != nil { + return nil + } + return sigbuf +} + +// publicKeyRSA returns the RSA public key from a DNSKEY record. +func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + + // RFC 2537/3110, section 2. RSA Public KEY Resource Records + // Length is in the 0th byte, unless its zero, then it + // it in bytes 1 and 2 and its a 16 bit number + explen := uint16(keybuf[0]) + keyoff := 1 + if explen == 0 { + explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) + keyoff = 3 + } + pubkey := new(rsa.PublicKey) + + pubkey.N = big.NewInt(0) + shift := uint64((explen - 1) * 8) + expo := uint64(0) + for i := int(explen - 1); i > 0; i-- { + expo += uint64(keybuf[keyoff+i]) << shift + shift -= 8 + } + // Remainder + expo += uint64(keybuf[keyoff]) + if expo > 2<<31 { + // Larger expo than supported. + // println("dns: F5 primes (or larger) are not supported") + return nil + } + pubkey.E = int(expo) + + pubkey.N.SetBytes(keybuf[keyoff+int(explen):]) + return pubkey +} + +// publicKeyECDSA returns the Curve public key from the DNSKEY record. +func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + pubkey := new(ecdsa.PublicKey) + switch k.Algorithm { + case ECDSAP256SHA256: + pubkey.Curve = elliptic.P256() + if len(keybuf) != 64 { + // wrongly encoded key + return nil + } + case ECDSAP384SHA384: + pubkey.Curve = elliptic.P384() + if len(keybuf) != 96 { + // Wrongly encoded key + return nil + } + } + pubkey.X = big.NewInt(0) + pubkey.X.SetBytes(keybuf[:len(keybuf)/2]) + pubkey.Y = big.NewInt(0) + pubkey.Y.SetBytes(keybuf[len(keybuf)/2:]) + return pubkey +} + +func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) < 22 { + return nil + } + t, keybuf := int(keybuf[0]), keybuf[1:] + size := 64 + t*8 + q, keybuf := keybuf[:20], keybuf[20:] + if len(keybuf) != 3*size { + return nil + } + p, keybuf := keybuf[:size], keybuf[size:] + g, y := keybuf[:size], keybuf[size:] + pubkey := new(dsa.PublicKey) + pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) + pubkey.Parameters.P = big.NewInt(0).SetBytes(p) + pubkey.Parameters.G = big.NewInt(0).SetBytes(g) + pubkey.Y = big.NewInt(0).SetBytes(y) + return pubkey +} + +type wireSlice [][]byte + +func (p wireSlice) Len() int { return len(p) } +func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p wireSlice) Less(i, j int) bool { + _, ioff, _ := UnpackDomainName(p[i], 0) + _, joff, _ := UnpackDomainName(p[j], 0) + return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 +} + +// Return the raw signature data. +func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { + wires := make(wireSlice, len(rrset)) + for i, r := range rrset { + r1 := r.copy() + r1.Header().Ttl = s.OrigTtl + labels := SplitDomainName(r1.Header().Name) + // 6.2. Canonical RR Form. (4) - wildcards + if len(labels) > int(s.Labels) { + // Wildcard + r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." + } + // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase + r1.Header().Name = strings.ToLower(r1.Header().Name) + // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. + // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, + // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, + // SRV, DNAME, A6 + // + // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): + // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record + // that needs conversion to lowercase, and twice at that. Since HINFO + // records contain no domain names, they are not subject to case + // conversion. + switch x := r1.(type) { + case *NS: + x.Ns = strings.ToLower(x.Ns) + case *CNAME: + x.Target = strings.ToLower(x.Target) + case *SOA: + x.Ns = strings.ToLower(x.Ns) + x.Mbox = strings.ToLower(x.Mbox) + case *MB: + x.Mb = strings.ToLower(x.Mb) + case *MG: + x.Mg = strings.ToLower(x.Mg) + case *MR: + x.Mr = strings.ToLower(x.Mr) + case *PTR: + x.Ptr = strings.ToLower(x.Ptr) + case *MINFO: + x.Rmail = strings.ToLower(x.Rmail) + x.Email = strings.ToLower(x.Email) + case *MX: + x.Mx = strings.ToLower(x.Mx) + case *NAPTR: + x.Replacement = strings.ToLower(x.Replacement) + case *KX: + x.Exchanger = strings.ToLower(x.Exchanger) + case *SRV: + x.Target = strings.ToLower(x.Target) + case *DNAME: + x.Target = strings.ToLower(x.Target) + } + // 6.2. Canonical RR Form. (5) - origTTL + wire := make([]byte, r1.len()+1) // +1 to be safe(r) + off, err1 := PackRR(r1, wire, 0, nil, false) + if err1 != nil { + return nil, err1 + } + wire = wire[:off] + wires[i] = wire + } + sort.Sort(wires) + for i, wire := range wires { + if i > 0 && bytes.Equal(wire, wires[i-1]) { + continue + } + buf = append(buf, wire...) + } + return buf, nil +} + +func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go RRSIG packing + off, err := packUint16(sw.TypeCovered, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(sw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(sw.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(sw.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(sw.SignerName, msg, off, nil, false) + if err != nil { + return off, err + } + return off, nil +} + +func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { + // copied from zmsg.go DNSKEY packing + off, err := packUint16(dw.Flags, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(dw.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(dw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(dw.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go new file mode 100644 index 000000000..229a07937 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keygen.go @@ -0,0 +1,156 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "math/big" +) + +// Generate generates a DNSKEY of the given bit size. +// The public part is put inside the DNSKEY record. +// The Algorithm in the key must be set as this will define +// what kind of DNSKEY will be generated. +// The ECDSA algorithms imply a fixed keysize, in that case +// bits should be set to the size of the algorithm. +func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + if bits != 1024 { + return nil, ErrKeySize + } + case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: + if bits < 512 || bits > 4096 { + return nil, ErrKeySize + } + case RSASHA512: + if bits < 1024 || bits > 4096 { + return nil, ErrKeySize + } + case ECDSAP256SHA256: + if bits != 256 { + return nil, ErrKeySize + } + case ECDSAP384SHA384: + if bits != 384 { + return nil, ErrKeySize + } + } + + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + params := new(dsa.Parameters) + if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { + return nil, err + } + priv := new(dsa.PrivateKey) + priv.PublicKey.Parameters = *params + err := dsa.GenerateKey(priv, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) + return priv, nil + case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: + priv, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) + return priv, nil + case ECDSAP256SHA256, ECDSAP384SHA384: + var c elliptic.Curve + switch k.Algorithm { + case ECDSAP256SHA256: + c = elliptic.P256() + case ECDSAP384SHA384: + c = elliptic.P384() + } + priv, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) + return priv, nil + default: + return nil, ErrAlg + } +} + +// Set the public key (the value E and N) +func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { + if _E == 0 || _N == nil { + return false + } + buf := exponentToBuf(_E) + buf = append(buf, _N.Bytes()...) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key for Elliptic Curves +func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { + if _X == nil || _Y == nil { + return false + } + var intlen int + switch k.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) + return true +} + +// Set the public key for DSA +func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { + if _Q == nil || _P == nil || _G == nil || _Y == nil { + return false + } + buf := dsaToBuf(_Q, _P, _G, _Y) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key (the values E and N) for RSA +// RFC 3110: Section 2. RSA Public KEY Resource Records +func exponentToBuf(_E int) []byte { + var buf []byte + i := big.NewInt(int64(_E)) + if len(i.Bytes()) < 256 { + buf = make([]byte, 1) + buf[0] = uint8(len(i.Bytes())) + } else { + buf = make([]byte, 3) + buf[0] = 0 + buf[1] = uint8(len(i.Bytes()) >> 8) + buf[2] = uint8(len(i.Bytes())) + } + buf = append(buf, i.Bytes()...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func curveToBuf(_X, _Y *big.Int, intlen int) []byte { + buf := intToBytes(_X, intlen) + buf = append(buf, intToBytes(_Y, intlen)...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { + t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) + buf := []byte{byte(t)} + buf = append(buf, intToBytes(_Q, 20)...) + buf = append(buf, intToBytes(_P, 64+t*8)...) + buf = append(buf, intToBytes(_G, 64+t*8)...) + buf = append(buf, intToBytes(_Y, 64+t*8)...) + return buf +} diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go new file mode 100644 index 000000000..c0b54dc76 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -0,0 +1,249 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "io" + "math/big" + "strconv" + "strings" +) + +// NewPrivateKey returns a PrivateKey by parsing the string s. +// s should be in the same form of the BIND private key files. +func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { + if s[len(s)-1] != '\n' { // We need a closing newline + return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") + } + return k.ReadPrivateKey(strings.NewReader(s), "") +} + +// ReadPrivateKey reads a private key from the io.Reader q. The string file is +// only used in error reporting. +// The public key must be known, because some cryptographic algorithms embed +// the public inside the privatekey. +func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { + m, err := parseKey(q, file) + if m == nil { + return nil, err + } + if _, ok := m["private-key-format"]; !ok { + return nil, ErrPrivKey + } + if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { + return nil, ErrPrivKey + } + // TODO(mg): check if the pubkey matches the private key + algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0]) + if err != nil { + return nil, ErrPrivKey + } + switch uint8(algo) { + case DSA: + priv, err := readPrivateKeyDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case RSAMD5: + fallthrough + case RSASHA1: + fallthrough + case RSASHA1NSEC3SHA1: + fallthrough + case RSASHA256: + fallthrough + case RSASHA512: + priv, err := readPrivateKeyRSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyRSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ECCGOST: + return nil, ErrPrivKey + case ECDSAP256SHA256: + fallthrough + case ECDSAP384SHA384: + priv, err := readPrivateKeyECDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyECDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + default: + return nil, ErrPrivKey + } +} + +// Read a private key (file) string and create a public key. Return the private key. +func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { + p := new(rsa.PrivateKey) + p.Primes = []*big.Int{nil, nil} + for k, v := range m { + switch k { + case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + switch k { + case "modulus": + p.PublicKey.N = big.NewInt(0) + p.PublicKey.N.SetBytes(v1) + case "publicexponent": + i := big.NewInt(0) + i.SetBytes(v1) + p.PublicKey.E = int(i.Int64()) // int64 should be large enough + case "privateexponent": + p.D = big.NewInt(0) + p.D.SetBytes(v1) + case "prime1": + p.Primes[0] = big.NewInt(0) + p.Primes[0].SetBytes(v1) + case "prime2": + p.Primes[1] = big.NewInt(0) + p.Primes[1].SetBytes(v1) + } + case "exponent1", "exponent2", "coefficient": + // not used in Go (yet) + case "created", "publish", "activate": + // not used in Go (yet) + } + } + return p, nil +} + +func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { + p := new(dsa.PrivateKey) + p.X = big.NewInt(0) + for k, v := range m { + switch k { + case "private_value(x)": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.X.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { + p := new(ecdsa.PrivateKey) + p.D = big.NewInt(0) + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.D.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +// parseKey reads a private key from r. It returns a map[string]string, +// with the key-value pairs, or an error when the file is not correct. +func parseKey(r io.Reader, file string) (map[string]string, error) { + s := scanInit(r) + m := make(map[string]string) + c := make(chan lex) + k := "" + // Start the lexer + go klexer(s, c) + for l := range c { + // It should alternate + switch l.value { + case zKey: + k = l.token + case zValue: + if k == "" { + return nil, &ParseError{file, "no private key seen", l} + } + //println("Setting", strings.ToLower(k), "to", l.token, "b") + m[strings.ToLower(k)] = l.token + k = "" + } + } + return m, nil +} + +// klexer scans the sourcefile and returns tokens on the channel c. +func klexer(s *scan, c chan lex) { + var l lex + str := "" // Hold the current read text + commt := false + key := true + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + switch x { + case ':': + if commt { + break + } + l.token = str + if key { + l.value = zKey + c <- l + // Next token is a space, eat it + s.tokenText() + key = false + str = "" + } else { + l.value = zValue + } + case ';': + commt = true + case '\n': + if commt { + // Reset a comment + commt = false + } + l.value = zValue + l.token = str + c <- l + str = "" + commt = false + key = true + default: + if commt { + break + } + str += string(x) + } + x, err = s.tokenText() + } + if len(str) > 0 { + // Send remainder + l.token = str + l.value = zValue + c <- l + } +} diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go new file mode 100644 index 000000000..56f3ea934 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_privkey.go @@ -0,0 +1,85 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "math/big" + "strconv" +) + +const format = "Private-key-format: v1.3\n" + +// PrivateKeyString converts a PrivateKey to a string. This string has the same +// format as the private-key-file of BIND9 (Private-key-format: v1.3). +// It needs some info from the key (the algorithm), so its a method of the DNSKEY +// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey +func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { + algorithm := strconv.Itoa(int(r.Algorithm)) + algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" + + switch p := p.(type) { + case *rsa.PrivateKey: + modulus := toBase64(p.PublicKey.N.Bytes()) + e := big.NewInt(int64(p.PublicKey.E)) + publicExponent := toBase64(e.Bytes()) + privateExponent := toBase64(p.D.Bytes()) + prime1 := toBase64(p.Primes[0].Bytes()) + prime2 := toBase64(p.Primes[1].Bytes()) + // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm + // and from: http://code.google.com/p/go/issues/detail?id=987 + one := big.NewInt(1) + p1 := big.NewInt(0).Sub(p.Primes[0], one) + q1 := big.NewInt(0).Sub(p.Primes[1], one) + exp1 := big.NewInt(0).Mod(p.D, p1) + exp2 := big.NewInt(0).Mod(p.D, q1) + coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0]) + + exponent1 := toBase64(exp1.Bytes()) + exponent2 := toBase64(exp2.Bytes()) + coefficient := toBase64(coeff.Bytes()) + + return format + + "Algorithm: " + algorithm + "\n" + + "Modulus: " + modulus + "\n" + + "PublicExponent: " + publicExponent + "\n" + + "PrivateExponent: " + privateExponent + "\n" + + "Prime1: " + prime1 + "\n" + + "Prime2: " + prime2 + "\n" + + "Exponent1: " + exponent1 + "\n" + + "Exponent2: " + exponent2 + "\n" + + "Coefficient: " + coefficient + "\n" + + case *ecdsa.PrivateKey: + var intlen int + switch r.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + private := toBase64(intToBytes(p.D, intlen)) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + case *dsa.PrivateKey: + T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) + prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) + subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) + base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) + priv := toBase64(intToBytes(p.X, 20)) + pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) + return format + + "Algorithm: " + algorithm + "\n" + + "Prime(p): " + prime + "\n" + + "Subprime(q): " + subprime + "\n" + + "Base(g): " + base + "\n" + + "Private_value(x): " + priv + "\n" + + "Public_value(y): " + pub + "\n" + + default: + return "" + } +} diff --git a/vendor/github.com/miekg/dns/dnsutil/util.go b/vendor/github.com/miekg/dns/dnsutil/util.go new file mode 100644 index 000000000..9ed03f296 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnsutil/util.go @@ -0,0 +1,79 @@ +// Package dnsutil contains higher-level methods useful with the dns +// package. While package dns implements the DNS protocols itself, +// these functions are related but not directly required for protocol +// processing. They are often useful in preparing input/output of the +// functions in package dns. +package dnsutil + +import ( + "strings" + + "github.com/miekg/dns" +) + +// AddDomain adds origin to s if s is not already a FQDN. +// Note that the result may not be a FQDN. If origin does not end +// with a ".", the result won't either. +// This implements the zonefile convention (specified in RFC 1035, +// Section "5.1. Format") that "@" represents the +// apex (bare) domain. i.e. AddOrigin("@", "foo.com.") returns "foo.com.". +func AddOrigin(s, origin string) string { + // ("foo.", "origin.") -> "foo." (already a FQDN) + // ("foo", "origin.") -> "foo.origin." + // ("foo"), "origin" -> "foo.origin" + // ("@", "origin.") -> "origin." (@ represents the apex (bare) domain) + // ("", "origin.") -> "origin." (not obvious) + // ("foo", "") -> "foo" (not obvious) + + if dns.IsFqdn(s) { + return s // s is already a FQDN, no need to mess with it. + } + if len(origin) == 0 { + return s // Nothing to append. + } + if s == "@" || len(s) == 0 { + return origin // Expand apex. + } + + if origin == "." { + return s + origin // AddOrigin(s, ".") is an expensive way to add a ".". + } + + return s + "." + origin // The simple case. +} + +// TrimDomainName trims origin from s if s is a subdomain. +// This function will never return "", but returns "@" instead (@ represents the apex (bare) domain). +func TrimDomainName(s, origin string) string { + // An apex (bare) domain is always returned as "@". + // If the return value ends in a ".", the domain was not the suffix. + // origin can end in "." or not. Either way the results should be the same. + + if len(s) == 0 { + return "@" // Return the apex (@) rather than "". + } + // Someone is using TrimDomainName(s, ".") to remove a dot if it exists. + if origin == "." { + return strings.TrimSuffix(s, origin) + } + + // Dude, you aren't even if the right subdomain! + if !dns.IsSubDomain(origin, s) { + return s + } + + slabels := dns.Split(s) + olabels := dns.Split(origin) + m := dns.CompareDomainName(s, origin) + if len(olabels) == m { + if len(olabels) == len(slabels) { + return "@" // origin == s + } + if (s[0] == '.') && (len(slabels) == (len(olabels) + 1)) { + return "@" // TrimDomainName(".foo.", "foo.") + } + } + + // Return the first (len-m) labels: + return s[:slabels[len(slabels)-m]-1] +} diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go new file mode 100644 index 000000000..f3555e433 --- /dev/null +++ b/vendor/github.com/miekg/dns/doc.go @@ -0,0 +1,251 @@ +/* +Package dns implements a full featured interface to the Domain Name System. +Server- and client-side programming is supported. +The package allows complete control over what is send out to the DNS. The package +API follows the less-is-more principle, by presenting a small, clean interface. + +The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers, +TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. +Note that domain names MUST be fully qualified, before sending them, unqualified +names in a message will result in a packing failure. + +Resource records are native types. They are not stored in wire format. +Basic usage pattern for creating a new resource record: + + r := new(dns.MX) + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, + Class: dns.ClassINET, Ttl: 3600} + r.Preference = 10 + r.Mx = "mx.miek.nl." + +Or directly from a string: + + mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") + +Or when the default TTL (3600) and class (IN) suit you: + + mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.") + +Or even: + + mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") + +In the DNS messages are exchanged, these messages contain resource +records (sets). Use pattern for creating a message: + + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + +Or when not certain if the domain name is fully qualified: + + m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) + +The message m is now a message with the question section set to ask +the MX records for the miek.nl. zone. + +The following is slightly more verbose, but more flexible: + + m1 := new(dns.Msg) + m1.Id = dns.Id() + m1.RecursionDesired = true + m1.Question = make([]dns.Question, 1) + m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} + +After creating a message it can be send. +Basic use pattern for synchronous querying the DNS at a +server configured on 127.0.0.1 and port 53: + + c := new(dns.Client) + in, rtt, err := c.Exchange(m1, "127.0.0.1:53") + +Suppressing multiple outstanding queries (with the same question, type and +class) is as easy as setting: + + c.SingleInflight = true + +If these "advanced" features are not needed, a simple UDP query can be send, +with: + + in, err := dns.Exchange(m1, "127.0.0.1:53") + +When this functions returns you will get dns message. A dns message consists +out of four sections. +The question section: in.Question, the answer section: in.Answer, +the authority section: in.Ns and the additional section: in.Extra. + +Each of these sections (except the Question section) contain a []RR. Basic +use pattern for accessing the rdata of a TXT RR as the first RR in +the Answer section: + + if t, ok := in.Answer[0].(*dns.TXT); ok { + // do something with t.Txt + } + +Domain Name and TXT Character String Representations + +Both domain names and TXT character strings are converted to presentation +form both when unpacked and when converted to strings. + +For TXT character strings, tabs, carriage returns and line feeds will be +converted to \t, \r and \n respectively. Back slashes and quotations marks +will be escaped. Bytes below 32 and above 127 will be converted to \DDD +form. + +For domain names, in addition to the above rules brackets, periods, +spaces, semicolons and the at symbol are escaped. + +DNSSEC + +DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It +uses public key cryptography to sign resource records. The +public keys are stored in DNSKEY records and the signatures in RRSIG records. + +Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit +to a request. + + m := new(dns.Msg) + m.SetEdns0(4096, true) + +Signature generation, signature verification and key generation are all supported. + +DYNAMIC UPDATES + +Dynamic updates reuses the DNS message format, but renames three of +the sections. Question is Zone, Answer is Prerequisite, Authority is +Update, only the Additional is not renamed. See RFC 2136 for the gory details. + +You can set a rather complex set of rules for the existence of absence of +certain resource records or names in a zone to specify if resource records +should be added or removed. The table from RFC 2136 supplemented with the Go +DNS function shows which functions exist to specify the prerequisites. + + 3.2.4 - Table Of Metavalues Used In Prerequisite Section + + CLASS TYPE RDATA Meaning Function + -------------------------------------------------------------- + ANY ANY empty Name is in use dns.NameUsed + ANY rrset empty RRset exists (value indep) dns.RRsetUsed + NONE ANY empty Name is not in use dns.NameNotUsed + NONE rrset empty RRset does not exist dns.RRsetNotUsed + zone rrset rr RRset exists (value dep) dns.Used + +The prerequisite section can also be left empty. +If you have decided on the prerequisites you can tell what RRs should +be added or deleted. The next table shows the options you have and +what functions to call. + + 3.4.2.6 - Table Of Metavalues Used In Update Section + + CLASS TYPE RDATA Meaning Function + --------------------------------------------------------------- + ANY ANY empty Delete all RRsets from name dns.RemoveName + ANY rrset empty Delete an RRset dns.RemoveRRset + NONE rrset rr Delete an RR from RRset dns.Remove + zone rrset rr Add to an RRset dns.Insert + +TRANSACTION SIGNATURE + +An TSIG or transaction signature adds a HMAC TSIG record to each message sent. +The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. + +Basic use pattern when querying with a TSIG name "axfr." (note that these key names +must be fully qualified - as they are domain names) and the base64 secret +"so6ZGir4GPAqINNh9U5c3A==": + + c := new(dns.Client) + c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + ... + // When sending the TSIG RR is calculated and filled in before sending + +When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with +TSIG, this is the basic use pattern. In this example we request an AXFR for +miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A==" +and using the server 176.58.119.54: + + t := new(dns.Transfer) + m := new(dns.Msg) + t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m.SetAxfr("miek.nl.") + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + c, err := t.In(m, "176.58.119.54:53") + for r := range c { ... } + +You can now read the records from the transfer as they come in. Each envelope is checked with TSIG. +If something is not correct an error is returned. + +Basic use pattern validating and replying to a message that has TSIG set. + + server := &dns.Server{Addr: ":53", Net: "udp"} + server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + go server.ListenAndServe() + dns.HandleFunc(".", handleRequest) + + func handleRequest(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + if r.IsTsig() != nil { + if w.TsigStatus() == nil { + // *Msg r has an TSIG record and it was validated + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + } else { + // *Msg r has an TSIG records and it was not valided + } + } + w.WriteMsg(m) + } + +PRIVATE RRS + +RFC 6895 sets aside a range of type codes for private use. This range +is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these +can be used, before requesting an official type code from IANA. + +see http://miek.nl/posts/2014/Sep/21/Private%20RRs%20and%20IDN%20in%20Go%20DNS/ for more +information. + +EDNS0 + +EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated +by RFC 6891. It defines an new RR type, the OPT RR, which is then completely +abused. +Basic use pattern for creating an (empty) OPT RR: + + o := new(dns.OPT) + o.Hdr.Name = "." // MUST be the root zone, per definition. + o.Hdr.Rrtype = dns.TypeOPT + +The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) +interfaces. Currently only a few have been standardized: EDNS0_NSID +(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note +that these options may be combined in an OPT RR. +Basic use pattern for a server to check if (and which) options are set: + + // o is a dns.OPT + for _, s := range o.Option { + switch e := s.(type) { + case *dns.EDNS0_NSID: + // do stuff with e.Nsid + case *dns.EDNS0_SUBNET: + // access e.Family, e.Address, etc. + } + } + +SIG(0) + +From RFC 2931: + + SIG(0) provides protection for DNS transactions and requests .... + ... protection for glue records, DNS requests, protection for message headers + on requests and responses, and protection of the overall integrity of a response. + +It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared +secret approach in TSIG. +Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and +RSASHA512. + +Signing subsequent messages in multi-message sessions is not implemented. +*/ +package dns diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go new file mode 100644 index 000000000..7a58aa9b1 --- /dev/null +++ b/vendor/github.com/miekg/dns/edns.go @@ -0,0 +1,532 @@ +package dns + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "net" + "strconv" +) + +// EDNS0 Option codes. +const ( + EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 + EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt + EDNS0NSID = 0x3 // nsid (RFC5001) + EDNS0DAU = 0x5 // DNSSEC Algorithm Understood + EDNS0DHU = 0x6 // DS Hash Understood + EDNS0N3U = 0x7 // NSEC3 Hash Understood + EDNS0SUBNET = 0x8 // client-subnet (RFC6891) + EDNS0EXPIRE = 0x9 // EDNS0 expire + EDNS0COOKIE = 0xa // EDNS0 Cookie + EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET + EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891) + EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891) + _DO = 1 << 15 // dnssec ok +) + +// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. +// See RFC 6891. +type OPT struct { + Hdr RR_Header + Option []EDNS0 `dns:"opt"` +} + +func (rr *OPT) String() string { + s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " + if rr.Do() { + s += "flags: do; " + } else { + s += "flags: ; " + } + s += "udp: " + strconv.Itoa(int(rr.UDPSize())) + + for _, o := range rr.Option { + switch o.(type) { + case *EDNS0_NSID: + s += "\n; NSID: " + o.String() + h, e := o.pack() + var r string + if e == nil { + for _, c := range h { + r += "(" + string(c) + ")" + } + s += " " + r + } + case *EDNS0_SUBNET: + s += "\n; SUBNET: " + o.String() + if o.(*EDNS0_SUBNET).DraftOption { + s += " (draft)" + } + case *EDNS0_COOKIE: + s += "\n; COOKIE: " + o.String() + case *EDNS0_UL: + s += "\n; UPDATE LEASE: " + o.String() + case *EDNS0_LLQ: + s += "\n; LONG LIVED QUERIES: " + o.String() + case *EDNS0_DAU: + s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() + case *EDNS0_DHU: + s += "\n; DS HASH UNDERSTOOD: " + o.String() + case *EDNS0_N3U: + s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() + case *EDNS0_LOCAL: + s += "\n; LOCAL OPT: " + o.String() + } + } + return s +} + +func (rr *OPT) len() int { + l := rr.Hdr.len() + for i := 0; i < len(rr.Option); i++ { + l += 4 // Account for 2-byte option code and 2-byte option length. + lo, _ := rr.Option[i].pack() + l += len(lo) + } + return l +} + +// return the old value -> delete SetVersion? + +// Version returns the EDNS version used. Only zero is defined. +func (rr *OPT) Version() uint8 { + return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16) +} + +// SetVersion sets the version of EDNS. This is usually zero. +func (rr *OPT) SetVersion(v uint8) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16) +} + +// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). +func (rr *OPT) ExtendedRcode() int { + return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15 +} + +// SetExtendedRcode sets the EDNS extended RCODE field. +func (rr *OPT) SetExtendedRcode(v uint8) { + if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have! + return + } + rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24) +} + +// UDPSize returns the UDP buffer size. +func (rr *OPT) UDPSize() uint16 { + return rr.Hdr.Class +} + +// SetUDPSize sets the UDP buffer size. +func (rr *OPT) SetUDPSize(size uint16) { + rr.Hdr.Class = size +} + +// Do returns the value of the DO (DNSSEC OK) bit. +func (rr *OPT) Do() bool { + return rr.Hdr.Ttl&_DO == _DO +} + +// SetDo sets the DO (DNSSEC OK) bit. +func (rr *OPT) SetDo() { + rr.Hdr.Ttl |= _DO +} + +// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. +type EDNS0 interface { + // Option returns the option code for the option. + Option() uint16 + // pack returns the bytes of the option data. + pack() ([]byte, error) + // unpack sets the data as found in the buffer. Is also sets + // the length of the slice as the length of the option data. + unpack([]byte) error + // String returns the string representation of the option. + String() string +} + +// The nsid EDNS0 option is used to retrieve a nameserver +// identifier. When sending a request Nsid must be set to the empty string +// The identifier is an opaque string encoded as hex. +// Basic use pattern for creating an nsid option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_NSID) +// e.Code = dns.EDNS0NSID +// e.Nsid = "AA" +// o.Option = append(o.Option, e) +type EDNS0_NSID struct { + Code uint16 // Always EDNS0NSID + Nsid string // This string needs to be hex encoded +} + +func (e *EDNS0_NSID) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Nsid) + if err != nil { + return nil, err + } + return h, nil +} + +func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } +func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } +func (e *EDNS0_NSID) String() string { return string(e.Nsid) } + +// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver +// an idea of where the client lives. It can then give back a different +// answer depending on the location or network topology. +// Basic use pattern for creating an subnet option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_SUBNET) +// e.Code = dns.EDNS0SUBNET +// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 +// e.NetMask = 32 // 32 for IPV4, 128 for IPv6 +// e.SourceScope = 0 +// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 +// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 +// o.Option = append(o.Option, e) +// +// Note: the spec (draft-ietf-dnsop-edns-client-subnet-00) has some insane logic +// for which netmask applies to the address. This code will parse all the +// available bits when unpacking (up to optlen). When packing it will apply +// SourceNetmask. If you need more advanced logic, patches welcome and good luck. +type EDNS0_SUBNET struct { + Code uint16 // Always EDNS0SUBNET + Family uint16 // 1 for IP, 2 for IP6 + SourceNetmask uint8 + SourceScope uint8 + Address net.IP + DraftOption bool // Set to true if using the old (0x50fa) option code +} + +func (e *EDNS0_SUBNET) Option() uint16 { + if e.DraftOption { + return EDNS0SUBNETDRAFT + } + return EDNS0SUBNET +} + +func (e *EDNS0_SUBNET) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint16(b[0:], e.Family) + b[2] = e.SourceNetmask + b[3] = e.SourceScope + switch e.Family { + case 1: + if e.SourceNetmask > net.IPv4len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address.To4()) != net.IPv4len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + case 2: + if e.SourceNetmask > net.IPv6len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address) != net.IPv6len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + default: + return nil, errors.New("dns: bad address family") + } + return b, nil +} + +func (e *EDNS0_SUBNET) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Family = binary.BigEndian.Uint16(b) + e.SourceNetmask = b[2] + e.SourceScope = b[3] + switch e.Family { + case 1: + if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv4len) + for i := 0; i < net.IPv4len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3]) + case 2: + if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv6len) + for i := 0; i < net.IPv6len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4], + addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], + addr[11], addr[12], addr[13], addr[14], addr[15]} + default: + return errors.New("dns: bad address family") + } + return nil +} + +func (e *EDNS0_SUBNET) String() (s string) { + if e.Address == nil { + s = "" + } else if e.Address.To4() != nil { + s = e.Address.String() + } else { + s = "[" + e.Address.String() + "]" + } + s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) + return +} + +// The Cookie EDNS0 option +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_COOKIE) +// e.Code = dns.EDNS0COOKIE +// e.Cookie = "24a5ac.." +// o.Option = append(o.Option, e) +// +// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is +// always 8 bytes. It may then optionally be followed by the server cookie. The server +// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: +// +// cCookie := o.Cookie[:16] +// sCookie := o.Cookie[16:] +// +// There is no guarantee that the Cookie string has a specific length. +type EDNS0_COOKIE struct { + Code uint16 // Always EDNS0COOKIE + Cookie string // Hex-encoded cookie data +} + +func (e *EDNS0_COOKIE) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Cookie) + if err != nil { + return nil, err + } + return h, nil +} + +func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } +func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } +func (e *EDNS0_COOKIE) String() string { return e.Cookie } + +// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set +// an expiration on an update RR. This is helpful for clients that cannot clean +// up after themselves. This is a draft RFC and more information can be found at +// http://files.dns-sd.org/draft-sekar-dns-ul.txt +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_UL) +// e.Code = dns.EDNS0UL +// e.Lease = 120 // in seconds +// o.Option = append(o.Option, e) +type EDNS0_UL struct { + Code uint16 // Always EDNS0UL + Lease uint32 +} + +func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } +func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } + +// Copied: http://golang.org/src/pkg/net/dnsmsg.go +func (e *EDNS0_UL) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, e.Lease) + return b, nil +} + +func (e *EDNS0_UL) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Lease = binary.BigEndian.Uint32(b) + return nil +} + +// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 +// Implemented for completeness, as the EDNS0 type code is assigned. +type EDNS0_LLQ struct { + Code uint16 // Always EDNS0LLQ + Version uint16 + Opcode uint16 + Error uint16 + Id uint64 + LeaseLife uint32 +} + +func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } + +func (e *EDNS0_LLQ) pack() ([]byte, error) { + b := make([]byte, 18) + binary.BigEndian.PutUint16(b[0:], e.Version) + binary.BigEndian.PutUint16(b[2:], e.Opcode) + binary.BigEndian.PutUint16(b[4:], e.Error) + binary.BigEndian.PutUint64(b[6:], e.Id) + binary.BigEndian.PutUint32(b[14:], e.LeaseLife) + return b, nil +} + +func (e *EDNS0_LLQ) unpack(b []byte) error { + if len(b) < 18 { + return ErrBuf + } + e.Version = binary.BigEndian.Uint16(b[0:]) + e.Opcode = binary.BigEndian.Uint16(b[2:]) + e.Error = binary.BigEndian.Uint16(b[4:]) + e.Id = binary.BigEndian.Uint64(b[6:]) + e.LeaseLife = binary.BigEndian.Uint32(b[14:]) + return nil +} + +func (e *EDNS0_LLQ) String() string { + s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + + " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + + " " + strconv.FormatUint(uint64(e.LeaseLife), 10) + return s +} + +type EDNS0_DAU struct { + Code uint16 // Always EDNS0DAU + AlgCode []uint8 +} + +func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } +func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DAU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := AlgorithmToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_DHU struct { + Code uint16 // Always EDNS0DHU + AlgCode []uint8 +} + +func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } +func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DHU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_N3U struct { + Code uint16 // Always EDNS0N3U + AlgCode []uint8 +} + +func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } +func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_N3U) String() string { + // Re-use the hash map + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_EXPIRE struct { + Code uint16 // Always EDNS0EXPIRE + Expire uint32 +} + +func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } +func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } + +func (e *EDNS0_EXPIRE) pack() ([]byte, error) { + b := make([]byte, 4) + b[0] = byte(e.Expire >> 24) + b[1] = byte(e.Expire >> 16) + b[2] = byte(e.Expire >> 8) + b[3] = byte(e.Expire) + return b, nil +} + +func (e *EDNS0_EXPIRE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Expire = binary.BigEndian.Uint32(b) + return nil +} + +// The EDNS0_LOCAL option is used for local/experimental purposes. The option +// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] +// (RFC6891), although any unassigned code can actually be used. The content of +// the option is made available in Data, unaltered. +// Basic use pattern for creating a local option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_LOCAL) +// e.Code = dns.EDNS0LOCALSTART +// e.Data = []byte{72, 82, 74} +// o.Option = append(o.Option, e) +type EDNS0_LOCAL struct { + Code uint16 + Data []byte +} + +func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } +func (e *EDNS0_LOCAL) String() string { + return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) +} + +func (e *EDNS0_LOCAL) pack() ([]byte, error) { + b := make([]byte, len(e.Data)) + copied := copy(b, e.Data) + if copied != len(e.Data) { + return nil, ErrBuf + } + return b, nil +} + +func (e *EDNS0_LOCAL) unpack(b []byte) error { + e.Data = make([]byte, len(b)) + copied := copy(e.Data, b) + if copied != len(b) { + return ErrBuf + } + return nil +} diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go new file mode 100644 index 000000000..3f5303c20 --- /dev/null +++ b/vendor/github.com/miekg/dns/format.go @@ -0,0 +1,87 @@ +package dns + +import ( + "net" + "reflect" + "strconv" +) + +// NumField returns the number of rdata fields r has. +func NumField(r RR) int { + return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header +} + +// Field returns the rdata field i as a string. Fields are indexed starting from 1. +// RR types that holds slice data, for instance the NSEC type bitmap will return a single +// string where the types are concatenated using a space. +// Accessing non existing fields will cause a panic. +func Field(r RR, i int) string { + if i == 0 { + return "" + } + d := reflect.ValueOf(r).Elem().Field(i) + switch k := d.Kind(); k { + case reflect.String: + return d.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(d.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(d.Uint(), 10) + case reflect.Slice: + switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { + case `dns:"a"`: + // TODO(miek): Hmm store this as 16 bytes + if d.Len() < net.IPv6len { + return net.IPv4(byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint())).String() + } + return net.IPv4(byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint())).String() + case `dns:"aaaa"`: + return net.IP{ + byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint()), + byte(d.Index(4).Uint()), + byte(d.Index(5).Uint()), + byte(d.Index(6).Uint()), + byte(d.Index(7).Uint()), + byte(d.Index(8).Uint()), + byte(d.Index(9).Uint()), + byte(d.Index(10).Uint()), + byte(d.Index(11).Uint()), + byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint()), + }.String() + case `dns:"nsec"`: + if d.Len() == 0 { + return "" + } + s := Type(d.Index(0).Uint()).String() + for i := 1; i < d.Len(); i++ { + s += " " + Type(d.Index(i).Uint()).String() + } + return s + default: + // if it does not have a tag its a string slice + fallthrough + case `dns:"txt"`: + if d.Len() == 0 { + return "" + } + s := d.Index(0).String() + for i := 1; i < d.Len(); i++ { + s += " " + d.Index(i).String() + } + return s + } + } + return "" +} diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go new file mode 100644 index 000000000..e4481a4b0 --- /dev/null +++ b/vendor/github.com/miekg/dns/generate.go @@ -0,0 +1,159 @@ +package dns + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +// Parse the $GENERATE statement as used in BIND9 zones. +// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. +// We are called after '$GENERATE '. After which we expect: +// * the range (12-24/2) +// * lhs (ownername) +// * [[ttl][class]] +// * type +// * rhs (rdata) +// But we are lazy here, only the range is parsed *all* occurrences +// of $ after that are interpreted. +// Any error are returned as a string value, the empty string signals +// "no error". +func generate(l lex, c chan lex, t chan *Token, o string) string { + step := 1 + if i := strings.IndexAny(l.token, "/"); i != -1 { + if i+1 == len(l.token) { + return "bad step in $GENERATE range" + } + if s, err := strconv.Atoi(l.token[i+1:]); err == nil { + if s < 0 { + return "bad step in $GENERATE range" + } + step = s + } else { + return "bad step in $GENERATE range" + } + l.token = l.token[:i] + } + sx := strings.SplitN(l.token, "-", 2) + if len(sx) != 2 { + return "bad start-stop in $GENERATE range" + } + start, err := strconv.Atoi(sx[0]) + if err != nil { + return "bad start in $GENERATE range" + } + end, err := strconv.Atoi(sx[1]) + if err != nil { + return "bad stop in $GENERATE range" + } + if end < 0 || start < 0 || end < start { + return "bad range in $GENERATE range" + } + + <-c // _BLANK + // Create a complete new string, which we then parse again. + s := "" +BuildRR: + l = <-c + if l.value != zNewline && l.value != zEOF { + s += l.token + goto BuildRR + } + for i := start; i <= end; i += step { + var ( + escape bool + dom bytes.Buffer + mod string + err error + offset int + ) + + for j := 0; j < len(s); j++ { // No 'range' because we need to jump around + switch s[j] { + case '\\': + if escape { + dom.WriteByte('\\') + escape = false + continue + } + escape = true + case '$': + mod = "%d" + offset = 0 + if escape { + dom.WriteByte('$') + escape = false + continue + } + escape = false + if j+1 >= len(s) { // End of the string + dom.WriteString(fmt.Sprintf(mod, i+offset)) + continue + } else { + if s[j+1] == '$' { + dom.WriteByte('$') + j++ + continue + } + } + // Search for { and } + if s[j+1] == '{' { // Modifier block + sep := strings.Index(s[j+2:], "}") + if sep == -1 { + return "bad modifier in $GENERATE" + } + mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) + if err != nil { + return err.Error() + } + j += 2 + sep // Jump to it + } + dom.WriteString(fmt.Sprintf(mod, i+offset)) + default: + if escape { // Pretty useless here + escape = false + continue + } + dom.WriteByte(s[j]) + } + } + // Re-parse the RR and send it on the current channel t + rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String()) + if err != nil { + return err.Error() + } + t <- &Token{RR: rx} + // Its more efficient to first built the rrlist and then parse it in + // one go! But is this a problem? + } + return "" +} + +// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. +func modToPrintf(s string) (string, int, error) { + xs := strings.SplitN(s, ",", 3) + if len(xs) != 3 { + return "", 0, errors.New("bad modifier in $GENERATE") + } + // xs[0] is offset, xs[1] is width, xs[2] is base + if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" { + return "", 0, errors.New("bad base in $GENERATE") + } + offset, err := strconv.Atoi(xs[0]) + if err != nil || offset > 255 { + return "", 0, errors.New("bad offset in $GENERATE") + } + width, err := strconv.Atoi(xs[1]) + if err != nil || width > 255 { + return "", offset, errors.New("bad width in $GENERATE") + } + switch { + case width < 0: + return "", offset, errors.New("bad width in $GENERATE") + case width == 0: + return "%" + xs[1] + xs[2], offset, nil + } + return "%0" + xs[1] + xs[2], offset, nil +} diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go new file mode 100644 index 000000000..fca5c7dd2 --- /dev/null +++ b/vendor/github.com/miekg/dns/labels.go @@ -0,0 +1,168 @@ +package dns + +// Holds a bunch of helper functions for dealing with labels. + +// SplitDomainName splits a name string into it's labels. +// www.miek.nl. returns []string{"www", "miek", "nl"} +// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, +// The root label (.) returns nil. Note that using +// strings.Split(s) will work in most cases, but does not handle +// escaped dots (\.) for instance. +// s must be a syntactically valid domain name, see IsDomainName. +func SplitDomainName(s string) (labels []string) { + if len(s) == 0 { + return nil + } + fqdnEnd := 0 // offset of the final '.' or the length of the name + idx := Split(s) + begin := 0 + if s[len(s)-1] == '.' { + fqdnEnd = len(s) - 1 + } else { + fqdnEnd = len(s) + } + + switch len(idx) { + case 0: + return nil + case 1: + // no-op + default: + end := 0 + for i := 1; i < len(idx); i++ { + end = idx[i] + labels = append(labels, s[begin:end-1]) + begin = end + } + } + + labels = append(labels, s[begin:fqdnEnd]) + return labels +} + +// CompareDomainName compares the names s1 and s2 and +// returns how many labels they have in common starting from the *right*. +// The comparison stops at the first inequality. The names are not downcased +// before the comparison. +// +// www.miek.nl. and miek.nl. have two labels in common: miek and nl +// www.miek.nl. and www.bla.nl. have one label in common: nl +// +// s1 and s2 must be syntactically valid domain names. +func CompareDomainName(s1, s2 string) (n int) { + s1 = Fqdn(s1) + s2 = Fqdn(s2) + l1 := Split(s1) + l2 := Split(s2) + + // the first check: root label + if l1 == nil || l2 == nil { + return + } + + j1 := len(l1) - 1 // end + i1 := len(l1) - 2 // start + j2 := len(l2) - 1 + i2 := len(l2) - 2 + // the second check can be done here: last/only label + // before we fall through into the for-loop below + if s1[l1[j1]:] == s2[l2[j2]:] { + n++ + } else { + return + } + for { + if i1 < 0 || i2 < 0 { + break + } + if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] { + n++ + } else { + break + } + j1-- + i1-- + j2-- + i2-- + } + return +} + +// CountLabel counts the the number of labels in the string s. +// s must be a syntactically valid domain name. +func CountLabel(s string) (labels int) { + if s == "." { + return + } + off := 0 + end := false + for { + off, end = NextLabel(s, off) + labels++ + if end { + return + } + } +} + +// Split splits a name s into its label indexes. +// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. +// The root name (.) returns nil. Also see SplitDomainName. +// s must be a syntactically valid domain name. +func Split(s string) []int { + if s == "." { + return nil + } + idx := make([]int, 1, 3) + off := 0 + end := false + + for { + off, end = NextLabel(s, off) + if end { + return idx + } + idx = append(idx, off) + } +} + +// NextLabel returns the index of the start of the next label in the +// string s starting at offset. +// The bool end is true when the end of the string has been reached. +// Also see PrevLabel. +func NextLabel(s string, offset int) (i int, end bool) { + quote := false + for i = offset; i < len(s)-1; i++ { + switch s[i] { + case '\\': + quote = !quote + default: + quote = false + case '.': + if quote { + quote = !quote + continue + } + return i + 1, false + } + } + return i + 1, true +} + +// PrevLabel returns the index of the label when starting from the right and +// jumping n labels to the left. +// The bool start is true when the start of the string has been overshot. +// Also see NextLabel. +func PrevLabel(s string, n int) (i int, start bool) { + if n == 0 { + return len(s), false + } + lab := Split(s) + if lab == nil { + return 0, true + } + if n > len(lab) { + return 0, true + } + return lab[len(lab)-n], false +} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go new file mode 100644 index 000000000..ec2f7ab7b --- /dev/null +++ b/vendor/github.com/miekg/dns/msg.go @@ -0,0 +1,1231 @@ +// DNS packet assembly, see RFC 1035. Converting from - Unpack() - +// and to - Pack() - wire format. +// All the packers and unpackers take a (msg []byte, off int) +// and return (off1 int, ok bool). If they return ok==false, they +// also return off1==len(msg), so that the next unpacker will +// also fail. This lets us avoid checks of ok until the end of a +// packing sequence. + +package dns + +//go:generate go run msg_generate.go + +import ( + crand "crypto/rand" + "encoding/binary" + "math/big" + "math/rand" + "strconv" +) + +func init() { + // Initialize default math/rand source using crypto/rand to provide better + // security without the performance trade-off. + buf := make([]byte, 8) + _, err := crand.Read(buf) + if err != nil { + // Failed to read from cryptographic source, fallback to default initial + // seed (1) by returning early + return + } + seed := binary.BigEndian.Uint64(buf) + rand.Seed(int64(seed)) +} + +const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer + +var ( + ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. + ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. + ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used it too small for the message. + ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being uses before it is initialized. + ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... + ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. + ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. + ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. + ErrKey error = &Error{err: "bad key"} + ErrKeySize error = &Error{err: "bad key size"} + ErrNoSig error = &Error{err: "no signature found"} + ErrPrivKey error = &Error{err: "bad private key"} + ErrRcode error = &Error{err: "bad rcode"} + ErrRdata error = &Error{err: "bad rdata"} + ErrRRset error = &Error{err: "bad rrset"} + ErrSecret error = &Error{err: "no secrets defined"} + ErrShortRead error = &Error{err: "short read"} + ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. + ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. + ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. + ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired. +) + +// Id, by default, returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. This being a +// variable the function can be reassigned to a custom function. +// For instance, to make it return a static value: +// +// dns.Id = func() uint16 { return 3 } +var Id func() uint16 = id + +// id returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. +func id() uint16 { + id32 := rand.Uint32() + return uint16(id32) +} + +// MsgHdr is a a manually-unpacked version of (id, bits). +type MsgHdr struct { + Id uint16 + Response bool + Opcode int + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + Zero bool + AuthenticatedData bool + CheckingDisabled bool + Rcode int +} + +// Msg contains the layout of a DNS message. +type Msg struct { + MsgHdr + Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. + Question []Question // Holds the RR(s) of the question section. + Answer []RR // Holds the RR(s) of the answer section. + Ns []RR // Holds the RR(s) of the authority section. + Extra []RR // Holds the RR(s) of the additional section. +} + +// ClassToString is a maps Classes to strings for each CLASS wire type. +var ClassToString = map[uint16]string{ + ClassINET: "IN", + ClassCSNET: "CS", + ClassCHAOS: "CH", + ClassHESIOD: "HS", + ClassNONE: "NONE", + ClassANY: "ANY", +} + +// OpcodeToString maps Opcodes to strings. +var OpcodeToString = map[int]string{ + OpcodeQuery: "QUERY", + OpcodeIQuery: "IQUERY", + OpcodeStatus: "STATUS", + OpcodeNotify: "NOTIFY", + OpcodeUpdate: "UPDATE", +} + +// RcodeToString maps Rcodes to strings. +var RcodeToString = map[int]string{ + RcodeSuccess: "NOERROR", + RcodeFormatError: "FORMERR", + RcodeServerFailure: "SERVFAIL", + RcodeNameError: "NXDOMAIN", + RcodeNotImplemented: "NOTIMPL", + RcodeRefused: "REFUSED", + RcodeYXDomain: "YXDOMAIN", // See RFC 2136 + RcodeYXRrset: "YXRRSET", + RcodeNXRrset: "NXRRSET", + RcodeNotAuth: "NOTAUTH", + RcodeNotZone: "NOTZONE", + RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 + // RcodeBadVers: "BADVERS", + RcodeBadKey: "BADKEY", + RcodeBadTime: "BADTIME", + RcodeBadMode: "BADMODE", + RcodeBadName: "BADNAME", + RcodeBadAlg: "BADALG", + RcodeBadTrunc: "BADTRUNC", + RcodeBadCookie: "BADCOOKIE", +} + +// Domain names are a sequence of counted strings +// split at the dots. They end with a zero-length string. + +// PackDomainName packs a domain name s into msg[off:]. +// If compression is wanted compress must be true and the compression +// map needs to hold a mapping between domain names and offsets +// pointing into msg. +func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + off1, _, err = packDomainName(s, msg, off, compression, compress) + return +} + +func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) { + // special case if msg == nil + lenmsg := 256 + if msg != nil { + lenmsg = len(msg) + } + ls := len(s) + if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. + return off, 0, nil + } + // If not fully qualified, error out, but only if msg == nil #ugly + switch { + case msg == nil: + if s[ls-1] != '.' { + s += "." + ls++ + } + case msg != nil: + if s[ls-1] != '.' { + return lenmsg, 0, ErrFqdn + } + } + // Each dot ends a segment of the name. + // We trade each dot byte for a length byte. + // Except for escaped dots (\.), which are normal dots. + // There is also a trailing zero. + + // Compression + nameoffset := -1 + pointer := -1 + // Emit sequence of counted strings, chopping at dots. + begin := 0 + bs := []byte(s) + roBs, bsFresh, escapedDot := s, true, false + for i := 0; i < ls; i++ { + if bs[i] == '\\' { + for j := i; j < ls-1; j++ { + bs[j] = bs[j+1] + } + ls-- + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + // check for \DDD + if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + bs[i] = dddToByte(bs[i:]) + for j := i + 1; j < ls-2; j++ { + bs[j] = bs[j+2] + } + ls -= 2 + } else if bs[i] == 't' { + bs[i] = '\t' + } else if bs[i] == 'r' { + bs[i] = '\r' + } else if bs[i] == 'n' { + bs[i] = '\n' + } + escapedDot = bs[i] == '.' + bsFresh = false + continue + } + + if bs[i] == '.' { + if i > 0 && bs[i-1] == '.' && !escapedDot { + // two dots back to back is not legal + return lenmsg, labels, ErrRdata + } + if i-begin >= 1<<6 { // top two bits of length must be clear + return lenmsg, labels, ErrRdata + } + // off can already (we're in a loop) be bigger than len(msg) + // this happens when a name isn't fully qualified + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = byte(i - begin) + } + offset := off + off++ + for j := begin; j < i; j++ { + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = bs[j] + } + off++ + } + if compress && !bsFresh { + roBs = string(bs) + bsFresh = true + } + // Don't try to compress '.' + if compress && roBs[begin:] != "." { + if p, ok := compression[roBs[begin:]]; !ok { + // Only offsets smaller than this can be used. + if offset < maxCompressionOffset { + compression[roBs[begin:]] = offset + } + } else { + // The first hit is the longest matching dname + // keep the pointer offset we get back and store + // the offset of the current name, because that's + // where we need to insert the pointer later + + // If compress is true, we're allowed to compress this dname + if pointer == -1 && compress { + pointer = p // Where to point to + nameoffset = offset // Where to point from + break + } + } + } + labels++ + begin = i + 1 + } + escapedDot = false + } + // Root label is special + if len(bs) == 1 && bs[0] == '.' { + return off, labels, nil + } + // If we did compression and we find something add the pointer here + if pointer != -1 { + // We have two bytes (14 bits) to put the pointer in + // if msg == nil, we will never do compression + binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000)) + off = nameoffset + 1 + goto End + } + if msg != nil && off < len(msg) { + msg[off] = 0 + } +End: + off++ + return off, labels, nil +} + +// Unpack a domain name. +// In addition to the simple sequences of counted strings above, +// domain names are allowed to refer to strings elsewhere in the +// packet, to avoid repeating common suffixes when returning +// many entries in a single domain. The pointers are marked +// by a length byte with the top two bits set. Ignoring those +// two bits, that byte and the next give a 14 bit offset from msg[0] +// where we should pick up the trail. +// Note that if we jump elsewhere in the packet, +// we return off1 == the offset after the first pointer we found, +// which is where the next record will start. +// In theory, the pointers are only allowed to jump backward. +// We let them jump anywhere and stop jumping after a while. + +// UnpackDomainName unpacks a domain name into a string. +func UnpackDomainName(msg []byte, off int) (string, int, error) { + s := make([]byte, 0, 64) + off1 := 0 + lenmsg := len(msg) + ptr := 0 // number of pointers followed +Loop: + for { + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // end of name + break Loop + } + // literal string + if off+c > lenmsg { + return "", lenmsg, ErrBuf + } + for j := off; j < off+c; j++ { + switch b := msg[j]; b { + case '.', '(', ')', ';', ' ', '@': + fallthrough + case '"', '\\': + s = append(s, '\\', b) + case '\t': + s = append(s, '\\', 't') + case '\r': + s = append(s, '\\', 'r') + default: + if b < 32 || b >= 127 { // unprintable use \DDD + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + s = append(s, '.') + off += c + case 0xC0: + // pointer to somewhere else in msg. + // remember location after first ptr, + // since that's how many bytes we consumed. + // also, don't follow too many pointers -- + // maybe there's a loop. + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c1 := msg[off] + off++ + if ptr == 0 { + off1 = off + } + if ptr++; ptr > 10 { + return "", lenmsg, &Error{err: "too many compression pointers"} + } + off = (c^0xC0)<<8 | int(c1) + default: + // 0x80 and 0x40 are reserved + return "", lenmsg, ErrRdata + } + } + if ptr == 0 { + off1 = off + } + if len(s) == 0 { + s = []byte(".") + } + return string(s), off1, nil +} + +func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { + if len(txt) == 0 { + if offset >= len(msg) { + return offset, ErrBuf + } + msg[offset] = 0 + return offset, nil + } + var err error + for i := range txt { + if len(txt[i]) > len(tmp) { + return offset, ErrBuf + } + offset, err = packTxtString(txt[i], msg, offset, tmp) + if err != nil { + return offset, err + } + } + return offset, nil +} + +func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { + lenByteOffset := offset + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + offset++ + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else if bs[i] == 't' { + msg[offset] = '\t' + } else if bs[i] == 'r' { + msg[offset] = '\r' + } else if bs[i] == 'n' { + msg[offset] = '\n' + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + l := offset - lenByteOffset - 1 + if l > 255 { + return offset, &Error{err: "string exceeded 255 bytes in txt"} + } + msg[lenByteOffset] = byte(l) + return offset, nil +} + +func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + return offset, nil +} + +func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { + off = off0 + var s string + for off < len(msg) && err == nil { + s, off, err = unpackTxtString(msg, off) + if err == nil { + ss = append(ss, s) + } + } + return +} + +func unpackTxtString(msg []byte, offset int) (string, int, error) { + if offset+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + l := int(msg[offset]) + if offset+l+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[offset+1 : offset+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + case '\t': + s = append(s, `\t`...) + case '\r': + s = append(s, `\r`...) + case '\n': + s = append(s, `\n`...) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + offset += 1 + l + return string(s), offset, nil +} + +// Helpers for dealing with escaped bytes +func isDigit(b byte) bool { return b >= '0' && b <= '9' } + +func dddToByte(s []byte) byte { + return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +} + +// Helper function for packing and unpacking +func intToBytes(i *big.Int, length int) []byte { + buf := i.Bytes() + if len(buf) < length { + b := make([]byte, length) + copy(b[length-len(buf):], buf) + return b + } + return buf +} + +// PackRR packs a resource record rr into msg[off:]. +// See PackDomainName for documentation about the compression. +func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if rr == nil { + return len(msg), &Error{err: "nil rr"} + } + + off1, err = rr.pack(msg, off, compression, compress) + if err != nil { + return len(msg), err + } + // TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well. + if rawSetRdlength(msg, off, off1) { + return off1, nil + } + return off, ErrRdata +} + +// UnpackRR unpacks msg[off:] into an RR. +func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { + h, off, msg, err := unpackHeader(msg, off) + if err != nil { + return nil, len(msg), err + } + end := off + int(h.Rdlength) + + if fn, known := typeToUnpack[h.Rrtype]; !known { + rr, off, err = unpackRFC3597(h, msg, off) + } else { + rr, off, err = fn(h, msg, off) + } + if off != end { + return &h, end, &Error{err: "bad rdlength"} + } + return rr, off, err +} + +// unpackRRslice unpacks msg[off:] into an []RR. +// If we cannot unpack the whole array, then it will return nil +func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { + var r RR + // Optimistically make dst be the length that was sent + dst := make([]RR, 0, l) + for i := 0; i < l; i++ { + off1 := off + r, off, err = UnpackRR(msg, off) + if err != nil { + off = len(msg) + break + } + // If offset does not increase anymore, l is a lie + if off1 == off { + l = i + break + } + dst = append(dst, r) + } + if err != nil && off == len(msg) { + dst = nil + } + return dst, off, err +} + +// Convert a MsgHdr to a string, with dig-like headers: +// +//;; opcode: QUERY, status: NOERROR, id: 48404 +// +//;; flags: qr aa rd ra; +func (h *MsgHdr) String() string { + if h == nil { + return " MsgHdr" + } + + s := ";; opcode: " + OpcodeToString[h.Opcode] + s += ", status: " + RcodeToString[h.Rcode] + s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" + + s += ";; flags:" + if h.Response { + s += " qr" + } + if h.Authoritative { + s += " aa" + } + if h.Truncated { + s += " tc" + } + if h.RecursionDesired { + s += " rd" + } + if h.RecursionAvailable { + s += " ra" + } + if h.Zero { // Hmm + s += " z" + } + if h.AuthenticatedData { + s += " ad" + } + if h.CheckingDisabled { + s += " cd" + } + + s += ";" + return s +} + +// Pack packs a Msg: it is converted to to wire format. +// If the dns.Compress is true the message will be in compressed wire format. +func (dns *Msg) Pack() (msg []byte, err error) { + return dns.PackBuffer(nil) +} + +// PackBuffer packs a Msg, using the given buffer buf. If buf is too small +// a new buffer is allocated. +func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { + // We use a similar function in tsig.go's stripTsig. + var ( + dh Header + compression map[string]int + ) + + if dns.Compress { + compression = make(map[string]int) // Compression pointer mappings + } + + if dns.Rcode < 0 || dns.Rcode > 0xFFF { + return nil, ErrRcode + } + if dns.Rcode > 0xF { + // Regular RCODE field is 4 bits + opt := dns.IsEdns0() + if opt == nil { + return nil, ErrExtendedRcode + } + opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) + dns.Rcode &= 0xF + } + + // Convert convenient Msg into wire-like Header. + dh.Id = dns.Id + dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode) + if dns.Response { + dh.Bits |= _QR + } + if dns.Authoritative { + dh.Bits |= _AA + } + if dns.Truncated { + dh.Bits |= _TC + } + if dns.RecursionDesired { + dh.Bits |= _RD + } + if dns.RecursionAvailable { + dh.Bits |= _RA + } + if dns.Zero { + dh.Bits |= _Z + } + if dns.AuthenticatedData { + dh.Bits |= _AD + } + if dns.CheckingDisabled { + dh.Bits |= _CD + } + + // Prepare variable sized arrays. + question := dns.Question + answer := dns.Answer + ns := dns.Ns + extra := dns.Extra + + dh.Qdcount = uint16(len(question)) + dh.Ancount = uint16(len(answer)) + dh.Nscount = uint16(len(ns)) + dh.Arcount = uint16(len(extra)) + + // We need the uncompressed length here, because we first pack it and then compress it. + msg = buf + compress := dns.Compress + dns.Compress = false + if packLen := dns.Len() + 1; len(msg) < packLen { + msg = make([]byte, packLen) + } + dns.Compress = compress + + // Pack it in: header and then the pieces. + off := 0 + off, err = dh.pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + for i := 0; i < len(question); i++ { + off, err = question[i].pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(answer); i++ { + off, err = PackRR(answer[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(ns); i++ { + off, err = PackRR(ns[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(extra); i++ { + off, err = PackRR(extra[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + return msg[:off], nil +} + +// Unpack unpacks a binary message to a Msg structure. +func (dns *Msg) Unpack(msg []byte) (err error) { + var ( + dh Header + off int + ) + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return err + } + if off == len(msg) { + return ErrTruncated + } + + dns.Id = dh.Id + dns.Response = (dh.Bits & _QR) != 0 + dns.Opcode = int(dh.Bits>>11) & 0xF + dns.Authoritative = (dh.Bits & _AA) != 0 + dns.Truncated = (dh.Bits & _TC) != 0 + dns.RecursionDesired = (dh.Bits & _RD) != 0 + dns.RecursionAvailable = (dh.Bits & _RA) != 0 + dns.Zero = (dh.Bits & _Z) != 0 + dns.AuthenticatedData = (dh.Bits & _AD) != 0 + dns.CheckingDisabled = (dh.Bits & _CD) != 0 + dns.Rcode = int(dh.Bits & 0xF) + + // Optimistically use the count given to us in the header + dns.Question = make([]Question, 0, int(dh.Qdcount)) + + for i := 0; i < int(dh.Qdcount); i++ { + off1 := off + var q Question + q, off, err = unpackQuestion(msg, off) + if err != nil { + // Even if Truncated is set, we only will set ErrTruncated if we + // actually got the questions + return err + } + if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! + dh.Qdcount = uint16(i) + break + } + dns.Question = append(dns.Question, q) + } + + dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) + // The header counts might have been wrong so we need to update it + dh.Ancount = uint16(len(dns.Answer)) + if err == nil { + dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Nscount = uint16(len(dns.Ns)) + if err == nil { + dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Arcount = uint16(len(dns.Extra)) + + if off != len(msg) { + // TODO(miek) make this an error? + // use PackOpt to let people tell how detailed the error reporting should be? + // println("dns: extra bytes in dns packet", off, "<", len(msg)) + } else if dns.Truncated { + // Whether we ran into a an error or not, we want to return that it + // was truncated + err = ErrTruncated + } + return err +} + +// Convert a complete message to a string with dig-like output. +func (dns *Msg) String() string { + if dns == nil { + return " MsgHdr" + } + s := dns.MsgHdr.String() + " " + s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " + s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + if len(dns.Question) > 0 { + s += "\n;; QUESTION SECTION:\n" + for i := 0; i < len(dns.Question); i++ { + s += dns.Question[i].String() + "\n" + } + } + if len(dns.Answer) > 0 { + s += "\n;; ANSWER SECTION:\n" + for i := 0; i < len(dns.Answer); i++ { + if dns.Answer[i] != nil { + s += dns.Answer[i].String() + "\n" + } + } + } + if len(dns.Ns) > 0 { + s += "\n;; AUTHORITY SECTION:\n" + for i := 0; i < len(dns.Ns); i++ { + if dns.Ns[i] != nil { + s += dns.Ns[i].String() + "\n" + } + } + } + if len(dns.Extra) > 0 { + s += "\n;; ADDITIONAL SECTION:\n" + for i := 0; i < len(dns.Extra); i++ { + if dns.Extra[i] != nil { + s += dns.Extra[i].String() + "\n" + } + } + } + return s +} + +// Len returns the message length when in (un)compressed wire format. +// If dns.Compress is true compression it is taken into account. Len() +// is provided to be a faster way to get the size of the resulting packet, +// than packing it, measuring the size and discarding the buffer. +func (dns *Msg) Len() int { + // We always return one more than needed. + l := 12 // Message header is always 12 bytes + var compression map[string]int + if dns.Compress { + compression = make(map[string]int) + } + for i := 0; i < len(dns.Question); i++ { + l += dns.Question[i].len() + if dns.Compress { + compressionLenHelper(compression, dns.Question[i].Name) + } + } + for i := 0; i < len(dns.Answer); i++ { + if dns.Answer[i] == nil { + continue + } + l += dns.Answer[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Answer[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Answer[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Answer[i]) + } + } + for i := 0; i < len(dns.Ns); i++ { + if dns.Ns[i] == nil { + continue + } + l += dns.Ns[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Ns[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Ns[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Ns[i]) + } + } + for i := 0; i < len(dns.Extra); i++ { + if dns.Extra[i] == nil { + continue + } + l += dns.Extra[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Extra[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Extra[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Extra[i]) + } + } + return l +} + +// Put the parts of the name in the compression map. +func compressionLenHelper(c map[string]int, s string) { + pref := "" + lbs := Split(s) + for j := len(lbs) - 1; j >= 0; j-- { + pref = s[lbs[j]:] + if _, ok := c[pref]; !ok { + c[pref] = len(pref) + } + } +} + +// Look for each part in the compression map and returns its length, +// keep on searching so we get the longest match. +func compressionLenSearch(c map[string]int, s string) (int, bool) { + off := 0 + end := false + if s == "" { // don't bork on bogus data + return 0, false + } + for { + if _, ok := c[s[off:]]; ok { + return len(s[off:]), true + } + if end { + break + } + off, end = NextLabel(s, off) + } + return 0, false +} + +// TODO(miek): should add all types, because the all can be *used* for compression. Autogenerate from msg_generate and put in zmsg.go +func compressionLenHelperType(c map[string]int, r RR) { + switch x := r.(type) { + case *NS: + compressionLenHelper(c, x.Ns) + case *MX: + compressionLenHelper(c, x.Mx) + case *CNAME: + compressionLenHelper(c, x.Target) + case *PTR: + compressionLenHelper(c, x.Ptr) + case *SOA: + compressionLenHelper(c, x.Ns) + compressionLenHelper(c, x.Mbox) + case *MB: + compressionLenHelper(c, x.Mb) + case *MG: + compressionLenHelper(c, x.Mg) + case *MR: + compressionLenHelper(c, x.Mr) + case *MF: + compressionLenHelper(c, x.Mf) + case *MD: + compressionLenHelper(c, x.Md) + case *RT: + compressionLenHelper(c, x.Host) + case *RP: + compressionLenHelper(c, x.Mbox) + compressionLenHelper(c, x.Txt) + case *MINFO: + compressionLenHelper(c, x.Rmail) + compressionLenHelper(c, x.Email) + case *AFSDB: + compressionLenHelper(c, x.Hostname) + case *SRV: + compressionLenHelper(c, x.Target) + case *NAPTR: + compressionLenHelper(c, x.Replacement) + case *RRSIG: + compressionLenHelper(c, x.SignerName) + case *NSEC: + compressionLenHelper(c, x.NextDomain) + // HIP? + } +} + +// Only search on compressing these types. +func compressionLenSearchType(c map[string]int, r RR) (int, bool) { + switch x := r.(type) { + case *NS: + return compressionLenSearch(c, x.Ns) + case *MX: + return compressionLenSearch(c, x.Mx) + case *CNAME: + return compressionLenSearch(c, x.Target) + case *DNAME: + return compressionLenSearch(c, x.Target) + case *PTR: + return compressionLenSearch(c, x.Ptr) + case *SOA: + k, ok := compressionLenSearch(c, x.Ns) + k1, ok1 := compressionLenSearch(c, x.Mbox) + if !ok && !ok1 { + return 0, false + } + return k + k1, true + case *MB: + return compressionLenSearch(c, x.Mb) + case *MG: + return compressionLenSearch(c, x.Mg) + case *MR: + return compressionLenSearch(c, x.Mr) + case *MF: + return compressionLenSearch(c, x.Mf) + case *MD: + return compressionLenSearch(c, x.Md) + case *RT: + return compressionLenSearch(c, x.Host) + case *MINFO: + k, ok := compressionLenSearch(c, x.Rmail) + k1, ok1 := compressionLenSearch(c, x.Email) + if !ok && !ok1 { + return 0, false + } + return k + k1, true + case *AFSDB: + return compressionLenSearch(c, x.Hostname) + } + return 0, false +} + +// Copy returns a new RR which is a deep-copy of r. +func Copy(r RR) RR { r1 := r.copy(); return r1 } + +// Len returns the length (in octets) of the uncompressed RR in wire format. +func Len(r RR) int { return r.len() } + +// Copy returns a new *Msg which is a deep-copy of dns. +func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } + +// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. +func (dns *Msg) CopyTo(r1 *Msg) *Msg { + r1.MsgHdr = dns.MsgHdr + r1.Compress = dns.Compress + + if len(dns.Question) > 0 { + r1.Question = make([]Question, len(dns.Question)) + copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy + } + + rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) + var rri int + + if len(dns.Answer) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Answer); i++ { + rrArr[rri] = dns.Answer[i].copy() + rri++ + } + r1.Answer = rrArr[rrbegin:rri:rri] + } + + if len(dns.Ns) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Ns); i++ { + rrArr[rri] = dns.Ns[i].copy() + rri++ + } + r1.Ns = rrArr[rrbegin:rri:rri] + } + + if len(dns.Extra) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Extra); i++ { + rrArr[rri] = dns.Extra[i].copy() + rri++ + } + r1.Extra = rrArr[rrbegin:rri:rri] + } + + return r1 +} + +func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := PackDomainName(q.Name, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint16(q.Qtype, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(q.Qclass, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func unpackQuestion(msg []byte, off int) (Question, int, error) { + var ( + q Question + err error + ) + q.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qtype, off, err = unpackUint16(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qclass, off, err = unpackUint16(msg, off) + if off == len(msg) { + return q, off, nil + } + return q, off, err +} + +func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := packUint16(dh.Id, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Bits, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Qdcount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Ancount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Nscount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Arcount, msg, off) + return off, err +} + +func unpackMsgHdr(msg []byte, off int) (Header, int, error) { + var ( + dh Header + err error + ) + dh.Id, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Bits, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Qdcount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Ancount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Nscount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Arcount, off, err = unpackUint16(msg, off) + return dh, off, err +} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go new file mode 100644 index 000000000..35786f22c --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_generate.go @@ -0,0 +1,340 @@ +//+build ignore + +// msg_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate pack/unpack methods based on the struct tags. The generated source is +// written to zmsg.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" +) + +var packageHdr = ` +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from msg_generate.go + +package dns + +` + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + fmt.Fprint(b, "// pack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name) + fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress) +if err != nil { + return off, err +} +headerEnd := off +`) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return off, err +} +`) + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("off, err = packStringTxt(rr.%s, msg, off)\n") + case `dns:"opt"`: + o("off, err = packDataOpt(rr.%s, msg, off)\n") + case `dns:"nsec"`: + o("off, err = packDataNsec(rr.%s, msg, off)\n") + case `dns:"domain-name"`: + o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: // ignored + case st.Tag(i) == `dns:"cdomain-name"`: + fallthrough + case st.Tag(i) == `dns:"domain-name"`: + o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") + case st.Tag(i) == `dns:"a"`: + o("off, err = packDataA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"aaaa"`: + o("off, err = packDataAAAA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"uint48"`: + o("off, err = packUint48(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"txt"`: + o("off, err = packString(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 + fallthrough + case st.Tag(i) == `dns:"base32"`: + o("off, err = packStringBase32(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("off, err = packStringBase64(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): // Hack to fix empty salt length for NSEC3 + o("if rr.%s == \"-\" { /* do nothing, empty salt */ }\n") + continue + case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex + fallthrough + case st.Tag(i) == `dns:"hex"`: + o("off, err = packStringHex(rr.%s, msg, off)\n") + + case st.Tag(i) == `dns:"octet"`: + o("off, err = packStringOctet(rr.%s, msg, off)\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("off, err = packUint8(rr.%s, msg, off)\n") + case types.Uint16: + o("off, err = packUint16(rr.%s, msg, off)\n") + case types.Uint32: + o("off, err = packUint32(rr.%s, msg, off)\n") + case types.Uint64: + o("off, err = packUint64(rr.%s, msg, off)\n") + case types.String: + o("off, err = packString(rr.%s, msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + // We have packed everything, only now we know the rdlength of this RR + fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)") + fmt.Fprintln(b, "return off, nil }\n") + } + + fmt.Fprint(b, "// unpack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) + fmt.Fprintf(b, "rr := new(%s)\n", name) + fmt.Fprint(b, "rr.Hdr = h\n") + fmt.Fprint(b, `if noRdata(h) { +return rr, off, nil + } +var err error +rdStart := off +_ = rdStart + +`) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return rr, off, err +} +`) + } + + // size-* are special, because they reference a struct member we should use for the length. + if strings.HasPrefix(st.Tag(i), `dns:"size-`) { + structMember := structMember(st.Tag(i)) + structTag := structTag(st.Tag(i)) + switch structTag { + case "hex": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base32": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base64": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + fmt.Fprint(b, `if err != nil { +return rr, off, err +} +`) + continue + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("rr.%s, off, err = unpackStringTxt(msg, off)\n") + case `dns:"opt"`: + o("rr.%s, off, err = unpackDataOpt(msg, off)\n") + case `dns:"nsec"`: + o("rr.%s, off, err = unpackDataNsec(msg, off)\n") + case `dns:"domain-name"`: + o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"cdomain-name"`: + fallthrough + case `dns:"domain-name"`: + o("rr.%s, off, err = UnpackDomainName(msg, off)\n") + case `dns:"a"`: + o("rr.%s, off, err = unpackDataA(msg, off)\n") + case `dns:"aaaa"`: + o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") + case `dns:"uint48"`: + o("rr.%s, off, err = unpackUint48(msg, off)\n") + case `dns:"txt"`: + o("rr.%s, off, err = unpackString(msg, off)\n") + case `dns:"base32"`: + o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"base64"`: + o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"hex"`: + o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"octet"`: + o("rr.%s, off, err = unpackStringOctet(msg, off)\n") + case "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("rr.%s, off, err = unpackUint8(msg, off)\n") + case types.Uint16: + o("rr.%s, off, err = unpackUint16(msg, off)\n") + case types.Uint32: + o("rr.%s, off, err = unpackUint32(msg, off)\n") + case types.Uint64: + o("rr.%s, off, err = unpackUint64(msg, off)\n") + case types.String: + o("rr.%s, off, err = unpackString(msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + // If we've hit len(msg) we return without error. + if i < st.NumFields()-1 { + fmt.Fprintf(b, `if off == len(msg) { +return rr, off, nil + } +`) + } + } + fmt.Fprintf(b, "return rr, off, err }\n\n") + } + // Generate typeToUnpack map + fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){") + for _, name := range namedTypes { + if name == "RFC3597" { + continue + } + fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name) + } + fmt.Fprintln(b, "}\n") + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("zmsg.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. +func structMember(s string) string { + fields := strings.Split(s, ":") + if len(fields) == 0 { + return "" + } + f := fields[len(fields)-1] + // f should have a closing " + if len(f) > 1 { + return f[:len(f)-1] + } + return f +} + +// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. +func structTag(s string) string { + fields := strings.Split(s, ":") + if len(fields) < 2 { + return "" + } + return fields[1][len("\"size-"):] +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go new file mode 100644 index 000000000..e7a9500cc --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -0,0 +1,630 @@ +package dns + +import ( + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "net" + "strconv" +) + +// helper functions called from the generated zmsg.go + +// These function are named after the tag to help pack/unpack, if there is no tag it is the name +// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or +// packDataDomainName. + +func unpackDataA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv4len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking a"} + } + a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) + off += net.IPv4len + return a, off, nil +} + +func packDataA(a net.IP, msg []byte, off int) (int, error) { + // It must be a slice of 4, even if it is 16, we encode only the first 4 + if off+net.IPv4len > len(msg) { + return len(msg), &Error{err: "overflow packing a"} + } + switch len(a) { + case net.IPv4len, net.IPv6len: + copy(msg[off:], a.To4()) + off += net.IPv4len + case 0: + // Allowed, for dynamic updates. + default: + return len(msg), &Error{err: "overflow packing a"} + } + return off, nil +} + +func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv6len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking aaaa"} + } + aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) + off += net.IPv6len + return aaaa, off, nil +} + +func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { + if off+net.IPv6len > len(msg) { + return len(msg), &Error{err: "overflow packing aaaa"} + } + + switch len(aaaa) { + case net.IPv6len: + copy(msg[off:], aaaa) + off += net.IPv6len + case 0: + // Allowed, dynamic updates. + default: + return len(msg), &Error{err: "overflow packing aaaa"} + } + return off, nil +} + +// unpackHeader unpacks an RR header, returning the offset to the end of the header and a +// re-sliced msg according to the expected length of the RR. +func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { + hdr := RR_Header{} + if off == len(msg) { + return hdr, off, msg, nil + } + + hdr.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rrtype, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Class, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Ttl, off, err = unpackUint32(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rdlength, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) + return hdr, off, msg, nil +} + +// pack packs an RR header, returning the offset to the end of the header. +// See PackDomainName for documentation about the compression. +func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if off == len(msg) { + return off, nil + } + + off, err = PackDomainName(hdr.Name, msg, off, compression, compress) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rrtype, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Class, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint32(hdr.Ttl, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rdlength, msg, off) + if err != nil { + return len(msg), err + } + return off, nil +} + +// helper helper functions. + +// truncateMsgFromRdLength truncates msg to match the expected length of the RR. +// Returns an error if msg is smaller than the expected size. +func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { + lenrd := off + int(rdlength) + if lenrd > len(msg) { + return msg, &Error{err: "overflowing header size"} + } + return msg[:lenrd], nil +} + +func fromBase32(s []byte) (buf []byte, err error) { + buflen := base32.HexEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base32.HexEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) } + +func fromBase64(s []byte) (buf []byte, err error) { + buflen := base64.StdEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base64.StdEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } + +// dynamicUpdate returns true if the Rdlength is zero. +func noRdata(h RR_Header) bool { return h.Rdlength == 0 } + +func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { + if off+1 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint8"} + } + return uint8(msg[off]), off + 1, nil +} + +func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { + if off+1 > len(msg) { + return len(msg), &Error{err: "overflow packing uint8"} + } + msg[off] = byte(i) + return off + 1, nil +} + +func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { + if off+2 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint16"} + } + return binary.BigEndian.Uint16(msg[off:]), off + 2, nil +} + +func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { + if off+2 > len(msg) { + return len(msg), &Error{err: "overflow packing uint16"} + } + binary.BigEndian.PutUint16(msg[off:], i) + return off + 2, nil +} + +func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { + if off+4 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint32"} + } + return binary.BigEndian.Uint32(msg[off:]), off + 4, nil +} + +func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { + if off+4 > len(msg) { + return len(msg), &Error{err: "overflow packing uint32"} + } + binary.BigEndian.PutUint32(msg[off:], i) + return off + 4, nil +} + +func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { + if off+6 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} + } + // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) + i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | + uint64(msg[off+4])<<8 | uint64(msg[off+5]))) + off += 6 + return i, off, nil +} + +func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { + if off+6 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64 as uint48"} + } + msg[off] = byte(i >> 40) + msg[off+1] = byte(i >> 32) + msg[off+2] = byte(i >> 24) + msg[off+3] = byte(i >> 16) + msg[off+4] = byte(i >> 8) + msg[off+5] = byte(i) + off += 6 + return off, nil +} + +func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { + if off+8 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64"} + } + return binary.BigEndian.Uint64(msg[off:]), off + 8, nil +} + +func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { + if off+8 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64"} + } + binary.BigEndian.PutUint64(msg[off:], i) + off += 8 + return off, nil +} + +func unpackString(msg []byte, off int) (string, int, error) { + if off+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + l := int(msg[off]) + if off+l+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[off+1 : off+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + case '\t', '\r', '\n': + s = append(s, b) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + off += 1 + l + return string(s), off, nil +} + +func packString(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packTxtString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackStringBase32(msg []byte, off, end int) (string, int, error) { + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base32"} + } + s := toBase32(msg[off:end]) + return s, end, nil +} + +func packStringBase32(s string, msg []byte, off int) (int, error) { + b32, err := fromBase32([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b32) > len(msg) { + return len(msg), &Error{err: "overflow packing base32"} + } + copy(msg[off:off+len(b32)], b32) + off += len(b32) + return off, nil +} + +func unpackStringBase64(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is base64 encoded value, so we don't need an explicit length + // to be set. Thus far all RR's that have base64 encoded fields have those as their + // last one. What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base64"} + } + s := toBase64(msg[off:end]) + return s, end, nil +} + +func packStringBase64(s string, msg []byte, off int) (int, error) { + b64, err := fromBase64([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b64) > len(msg) { + return len(msg), &Error{err: "overflow packing base64"} + } + copy(msg[off:off+len(b64)], b64) + off += len(b64) + return off, nil +} + +func unpackStringHex(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is hex encoded value, so we don't need an explicit length + // to be set. NSEC and TSIG have hex fields with a length field. + // What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking hex"} + } + + s := hex.EncodeToString(msg[off:end]) + return s, end, nil +} + +func packStringHex(s string, msg []byte, off int) (int, error) { + h, err := hex.DecodeString(s) + if err != nil { + return len(msg), err + } + if off+(len(h)) > len(msg) { + return len(msg), &Error{err: "overflow packing hex"} + } + copy(msg[off:off+len(h)], h) + off += len(h) + return off, nil +} + +func unpackStringTxt(msg []byte, off int) ([]string, int, error) { + txt, off, err := unpackTxt(msg, off) + if err != nil { + return nil, len(msg), err + } + return txt, off, nil +} + +func packStringTxt(s []string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. + off, err := packTxt(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { + var edns []EDNS0 +Option: + code := uint16(0) + if off+4 > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + code = binary.BigEndian.Uint16(msg[off:]) + off += 2 + optlen := binary.BigEndian.Uint16(msg[off:]) + off += 2 + if off+int(optlen) > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + switch code { + case EDNS0NSID: + e := new(EDNS0_NSID) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0SUBNET, EDNS0SUBNETDRAFT: + e := new(EDNS0_SUBNET) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + if code == EDNS0SUBNETDRAFT { + e.DraftOption = true + } + case EDNS0COOKIE: + e := new(EDNS0_COOKIE) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0UL: + e := new(EDNS0_UL) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0LLQ: + e := new(EDNS0_LLQ) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DAU: + e := new(EDNS0_DAU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DHU: + e := new(EDNS0_DHU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0N3U: + e := new(EDNS0_N3U) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + default: + e := new(EDNS0_LOCAL) + e.Code = code + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + } + + if off < len(msg) { + goto Option + } + + return edns, off, nil +} + +func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { + for _, el := range options { + b, err := el.pack() + if err != nil || off+3 > len(msg) { + return len(msg), &Error{err: "overflow packing opt"} + } + binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code + binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length + off += 4 + if off+len(b) > len(msg) { + copy(msg[off:], b) + off = len(msg) + continue + } + // Actual data + copy(msg[off:off+len(b)], b) + off += len(b) + } + return off, nil +} + +func unpackStringOctet(msg []byte, off int) (string, int, error) { + s := string(msg[off:]) + return s, len(msg), nil +} + +func packStringOctet(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packOctetString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { + var nsec []uint16 + length, window, lastwindow := 0, 0, -1 + for off < len(msg) { + if off+2 > len(msg) { + return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} + } + window = int(msg[off]) + length = int(msg[off+1]) + off += 2 + if window <= lastwindow { + // RFC 4034: Blocks are present in the NSEC RR RDATA in + // increasing numerical order. + return nsec, len(msg), &Error{err: "out of order NSEC block"} + } + if length == 0 { + // RFC 4034: Blocks with no types present MUST NOT be included. + return nsec, len(msg), &Error{err: "empty NSEC block"} + } + if length > 32 { + return nsec, len(msg), &Error{err: "NSEC block too long"} + } + if off+length > len(msg) { + return nsec, len(msg), &Error{err: "overflowing NSEC block"} + } + + // Walk the bytes in the window and extract the type bits + for j := 0; j < length; j++ { + b := msg[off+j] + // Check the bits one by one, and set the type + if b&0x80 == 0x80 { + nsec = append(nsec, uint16(window*256+j*8+0)) + } + if b&0x40 == 0x40 { + nsec = append(nsec, uint16(window*256+j*8+1)) + } + if b&0x20 == 0x20 { + nsec = append(nsec, uint16(window*256+j*8+2)) + } + if b&0x10 == 0x10 { + nsec = append(nsec, uint16(window*256+j*8+3)) + } + if b&0x8 == 0x8 { + nsec = append(nsec, uint16(window*256+j*8+4)) + } + if b&0x4 == 0x4 { + nsec = append(nsec, uint16(window*256+j*8+5)) + } + if b&0x2 == 0x2 { + nsec = append(nsec, uint16(window*256+j*8+6)) + } + if b&0x1 == 0x1 { + nsec = append(nsec, uint16(window*256+j*8+7)) + } + } + off += length + lastwindow = window + } + return nsec, off, nil +} + +func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { + if len(bitmap) == 0 { + return off, nil + } + var lastwindow, lastlength uint16 + for j := 0; j < len(bitmap); j++ { + t := bitmap[j] + window := t / 256 + length := (t-window*256)/8 + 1 + if window > lastwindow && lastlength != 0 { // New window, jump to the new offset + off += int(lastlength) + 2 + lastlength = 0 + } + if window < lastwindow || length < lastlength { + return len(msg), &Error{err: "nsec bits out of order"} + } + if off+2+int(length) > len(msg) { + return len(msg), &Error{err: "overflow packing nsec"} + } + // Setting the window # + msg[off] = byte(window) + // Setting the octets length + msg[off+1] = byte(length) + // Setting the bit value for the type in the right octet + msg[off+1+int(length)] |= byte(1 << (7 - (t % 8))) + lastwindow, lastlength = window, length + } + off += int(lastlength) + 2 + return off, nil +} + +func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { + var ( + servers []string + s string + err error + ) + if end > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking domain names"} + } + for off < end { + s, off, err = UnpackDomainName(msg, off) + if err != nil { + return servers, len(msg), err + } + servers = append(servers, s) + } + return servers, off, nil +} + +func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) { + var err error + for j := 0; j < len(names); j++ { + off, err = PackDomainName(names[j], msg, off, compression, false && compress) + if err != nil { + return len(msg), err + } + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go new file mode 100644 index 000000000..6f10f3e65 --- /dev/null +++ b/vendor/github.com/miekg/dns/nsecx.go @@ -0,0 +1,119 @@ +package dns + +import ( + "crypto/sha1" + "hash" + "io" + "strings" +) + +type saltWireFmt struct { + Salt string `dns:"size-hex"` +} + +// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. +func HashName(label string, ha uint8, iter uint16, salt string) string { + saltwire := new(saltWireFmt) + saltwire.Salt = salt + wire := make([]byte, DefaultMsgSize) + n, err := packSaltWire(saltwire, wire) + if err != nil { + return "" + } + wire = wire[:n] + name := make([]byte, 255) + off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) + if err != nil { + return "" + } + name = name[:off] + var s hash.Hash + switch ha { + case SHA1: + s = sha1.New() + default: + return "" + } + + // k = 0 + name = append(name, wire...) + io.WriteString(s, string(name)) + nsec3 := s.Sum(nil) + // k > 0 + for k := uint16(0); k < iter; k++ { + s.Reset() + nsec3 = append(nsec3, wire...) + io.WriteString(s, string(nsec3)) + nsec3 = s.Sum(nil) + } + return toBase32(nsec3) +} + +// Denialer is an interface that should be implemented by types that are used to denial +// answers in DNSSEC. +type Denialer interface { + // Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3. + Cover(name string) bool + // Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3. + Match(name string) bool +} + +// Cover implements the Denialer interface. +func (rr *NSEC) Cover(name string) bool { + return true +} + +// Match implements the Denialer interface. +func (rr *NSEC) Match(name string) bool { + return true +} + +// Cover implements the Denialer interface. +func (rr *NSEC3) Cover(name string) bool { + // FIXME(miek): check if the zones match + // FIXME(miek): check if we're not dealing with parent nsec3 + hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + labels := Split(rr.Hdr.Name) + if len(labels) < 2 { + return false + } + hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot + if hash == rr.NextDomain { + return false // empty interval + } + if hash > rr.NextDomain { // last name, points to apex + // hname > hash + // hname > rr.NextDomain + // TODO(miek) + } + if hname <= hash { + return false + } + if hname >= rr.NextDomain { + return false + } + return true +} + +// Match implements the Denialer interface. +func (rr *NSEC3) Match(name string) bool { + // FIXME(miek): Check if we are in the same zone + hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + labels := Split(rr.Hdr.Name) + if len(labels) < 2 { + return false + } + hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the . + if hash == hname { + return true + } + return false +} + +func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) { + off, err := packStringHex(sw.Salt, msg, 0) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go new file mode 100644 index 000000000..6b08e6e95 --- /dev/null +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -0,0 +1,149 @@ +package dns + +import ( + "fmt" + "strings" +) + +// PrivateRdata is an interface used for implementing "Private Use" RR types, see +// RFC 6895. This allows one to experiment with new RR types, without requesting an +// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. +type PrivateRdata interface { + // String returns the text presentaton of the Rdata of the Private RR. + String() string + // Parse parses the Rdata of the private RR. + Parse([]string) error + // Pack is used when packing a private RR into a buffer. + Pack([]byte) (int, error) + // Unpack is used when unpacking a private RR from a buffer. + // TODO(miek): diff. signature than Pack, see edns0.go for instance. + Unpack([]byte) (int, error) + // Copy copies the Rdata. + Copy(PrivateRdata) error + // Len returns the length in octets of the Rdata. + Len() int +} + +// PrivateRR represents an RR that uses a PrivateRdata user-defined type. +// It mocks normal RRs and implements dns.RR interface. +type PrivateRR struct { + Hdr RR_Header + Data PrivateRdata +} + +func mkPrivateRR(rrtype uint16) *PrivateRR { + // Panics if RR is not an instance of PrivateRR. + rrfunc, ok := TypeToRR[rrtype] + if !ok { + panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype)) + } + + anyrr := rrfunc() + switch rr := anyrr.(type) { + case *PrivateRR: + return rr + } + panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) +} + +// Header return the RR header of r. +func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } + +func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } + +// Private len and copy parts to satisfy RR interface. +func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } +func (r *PrivateRR) copy() RR { + // make new RR like this: + rr := mkPrivateRR(r.Hdr.Rrtype) + newh := r.Hdr.copyHeader() + rr.Hdr = *newh + + err := r.Data.Copy(rr.Data) + if err != nil { + panic("dns: got value that could not be used to copy Private rdata") + } + return rr +} +func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := r.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + n, err := r.Data.Pack(msg[off:]) + if err != nil { + return len(msg), err + } + off += n + r.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// PrivateHandle registers a private resource record type. It requires +// string and numeric representation of private RR type and generator function as argument. +func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { + rtypestr = strings.ToUpper(rtypestr) + + TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } + TypeToString[rtype] = rtypestr + StringToType[rtypestr] = rtype + + typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) { + if noRdata(h) { + return &h, off, nil + } + var err error + + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + off1, err := rr.Data.Unpack(msg[off:]) + off += off1 + if err != nil { + return rr, off, err + } + return rr, off, err + } + + setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + var l lex + text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 + Fetch: + for { + // TODO(miek): we could also be returning _QUOTE, this might or might not + // be an issue (basically parsing TXT becomes hard) + switch l = <-c; l.value { + case zNewline, zEOF: + break Fetch + case zString: + text = append(text, l.token) + } + } + + err := rr.Data.Parse(text) + if err != nil { + return nil, &ParseError{f, err.Error(), l}, "" + } + + return rr, nil, "" + } + + typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} +} + +// PrivateHandleRemove removes defenitions required to support private RR type. +func PrivateHandleRemove(rtype uint16) { + rtypestr, ok := TypeToString[rtype] + if ok { + delete(TypeToRR, rtype) + delete(TypeToString, rtype) + delete(typeToparserFunc, rtype) + delete(StringToType, rtypestr) + delete(typeToUnpack, rtype) + } + return +} diff --git a/vendor/github.com/miekg/dns/rawmsg.go b/vendor/github.com/miekg/dns/rawmsg.go new file mode 100644 index 000000000..6e21fba7e --- /dev/null +++ b/vendor/github.com/miekg/dns/rawmsg.go @@ -0,0 +1,49 @@ +package dns + +import "encoding/binary" + +// rawSetRdlength sets the rdlength in the header of +// the RR. The offset 'off' must be positioned at the +// start of the header of the RR, 'end' must be the +// end of the RR. +func rawSetRdlength(msg []byte, off, end int) bool { + l := len(msg) +Loop: + for { + if off+1 > l { + return false + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // End of the domainname + break Loop + } + if off+c > l { + return false + } + off += c + + case 0xC0: + // pointer, next byte included, ends domainname + off++ + break Loop + } + } + // The domainname has been seen, we at the start of the fixed part in the header. + // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length. + off += 2 + 2 + 4 + if off+2 > l { + return false + } + //off+1 is the end of the header, 'end' is the end of the rr + //so 'end' - 'off+2' is the length of the rdata + rdatalen := end - (off + 2) + if rdatalen > 0xFFFF { + return false + } + binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen)) + return true +} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go new file mode 100644 index 000000000..099dac948 --- /dev/null +++ b/vendor/github.com/miekg/dns/reverse.go @@ -0,0 +1,38 @@ +package dns + +// StringToType is the reverse of TypeToString, needed for string parsing. +var StringToType = reverseInt16(TypeToString) + +// StringToClass is the reverse of ClassToString, needed for string parsing. +var StringToClass = reverseInt16(ClassToString) + +// Map of opcodes strings. +var StringToOpcode = reverseInt(OpcodeToString) + +// Map of rcodes strings. +var StringToRcode = reverseInt(RcodeToString) + +// Reverse a map +func reverseInt8(m map[uint8]string) map[string]uint8 { + n := make(map[string]uint8, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt16(m map[uint16]string) map[string]uint16 { + n := make(map[string]uint16, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt(m map[int]string) map[string]int { + n := make(map[string]int, len(m)) + for u, s := range m { + n[s] = u + } + return n +} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go new file mode 100644 index 000000000..b489f3f05 --- /dev/null +++ b/vendor/github.com/miekg/dns/sanitize.go @@ -0,0 +1,84 @@ +package dns + +// Dedup removes identical RRs from rrs. It preserves the original ordering. +// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies +// rrs. +// m is used to store the RRs temporay. If it is nil a new map will be allocated. +func Dedup(rrs []RR, m map[string]RR) []RR { + if m == nil { + m = make(map[string]RR) + } + // Save the keys, so we don't have to call normalizedString twice. + keys := make([]*string, 0, len(rrs)) + + for _, r := range rrs { + key := normalizedString(r) + keys = append(keys, &key) + if _, ok := m[key]; ok { + // Shortest TTL wins. + if m[key].Header().Ttl > r.Header().Ttl { + m[key].Header().Ttl = r.Header().Ttl + } + continue + } + + m[key] = r + } + // If the length of the result map equals the amount of RRs we got, + // it means they were all different. We can then just return the original rrset. + if len(m) == len(rrs) { + return rrs + } + + j := 0 + for i, r := range rrs { + // If keys[i] lives in the map, we should copy and remove it. + if _, ok := m[*keys[i]]; ok { + delete(m, *keys[i]) + rrs[j] = r + j++ + } + + if len(m) == 0 { + break + } + } + + return rrs[:j] +} + +// normalizedString returns a normalized string from r. The TTL +// is removed and the domain name is lowercased. We go from this: +// DomainNameTTLCLASSTYPERDATA to: +// lowercasenameCLASSTYPE... +func normalizedString(r RR) string { + // A string Go DNS makes has: domainnameTTL... + b := []byte(r.String()) + + // find the first non-escaped tab, then another, so we capture where the TTL lives. + esc := false + ttlStart, ttlEnd := 0, 0 + for i := 0; i < len(b) && ttlEnd == 0; i++ { + switch { + case b[i] == '\\': + esc = !esc + case b[i] == '\t' && !esc: + if ttlStart == 0 { + ttlStart = i + continue + } + if ttlEnd == 0 { + ttlEnd = i + } + case b[i] >= 'A' && b[i] <= 'Z' && !esc: + b[i] += 32 + default: + esc = false + } + } + + // remove TTL. + copy(b[ttlStart:], b[ttlEnd:]) + cut := ttlEnd - ttlStart + return string(b[:len(b)-cut]) +} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go new file mode 100644 index 000000000..0e83797fb --- /dev/null +++ b/vendor/github.com/miekg/dns/scan.go @@ -0,0 +1,974 @@ +package dns + +import ( + "io" + "log" + "os" + "strconv" + "strings" +) + +type debugging bool + +const debug debugging = false + +func (d debugging) Printf(format string, args ...interface{}) { + if d { + log.Printf(format, args...) + } +} + +const maxTok = 2048 // Largest token we can return. +const maxUint16 = 1<<16 - 1 + +// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: +// * Add ownernames if they are left blank; +// * Suppress sequences of spaces; +// * Make each RR fit on one line (_NEWLINE is send as last) +// * Handle comments: ; +// * Handle braces - anywhere. +const ( + // Zonefile + zEOF = iota + zString + zBlank + zQuote + zNewline + zRrtpe + zOwner + zClass + zDirOrigin // $ORIGIN + zDirTtl // $TTL + zDirInclude // $INCLUDE + zDirGenerate // $GENERATE + + // Privatekey file + zValue + zKey + + zExpectOwnerDir // Ownername + zExpectOwnerBl // Whitespace after the ownername + zExpectAny // Expect rrtype, ttl or class + zExpectAnyNoClass // Expect rrtype or ttl + zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS + zExpectAnyNoTtl // Expect rrtype or class + zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL + zExpectRrtype // Expect rrtype + zExpectRrtypeBl // Whitespace BEFORE rrtype + zExpectRdata // The first element of the rdata + zExpectDirTtlBl // Space after directive $TTL + zExpectDirTtl // Directive $TTL + zExpectDirOriginBl // Space after directive $ORIGIN + zExpectDirOrigin // Directive $ORIGIN + zExpectDirIncludeBl // Space after directive $INCLUDE + zExpectDirInclude // Directive $INCLUDE + zExpectDirGenerate // Directive $GENERATE + zExpectDirGenerateBl // Space after directive $GENERATE +) + +// ParseError is a parsing error. It contains the parse error and the location in the io.Reader +// where the error occurred. +type ParseError struct { + file string + err string + lex lex +} + +func (e *ParseError) Error() (s string) { + if e.file != "" { + s = e.file + ": " + } + s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + + strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) + return +} + +type lex struct { + token string // text of the token + tokenUpper string // uppercase text of the token + length int // length of the token + err bool // when true, token text has lexer error + value uint8 // value: zString, _BLANK, etc. + line int // line in the file + column int // column in the file + torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar + comment string // any comment text seen +} + +// Token holds the token that are returned when a zone file is parsed. +type Token struct { + // The scanned resource record when error is not nil. + RR + // When an error occurred, this has the error specifics. + Error *ParseError + // A potential comment positioned after the RR and on the same line. + Comment string +} + +// NewRR reads the RR contained in the string s. Only the first RR is +// returned. If s contains no RR, return nil with no error. The class +// defaults to IN and TTL defaults to 3600. The full zone file syntax +// like $TTL, $ORIGIN, etc. is supported. All fields of the returned +// RR are set, except RR.Header().Rdlength which is set to 0. +func NewRR(s string) (RR, error) { + if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline + return ReadRR(strings.NewReader(s+"\n"), "") + } + return ReadRR(strings.NewReader(s), "") +} + +// ReadRR reads the RR contained in q. +// See NewRR for more documentation. +func ReadRR(q io.Reader, filename string) (RR, error) { + r := <-parseZoneHelper(q, ".", filename, 1) + if r == nil { + return nil, nil + } + + if r.Error != nil { + return nil, r.Error + } + return r.RR, nil +} + +// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the +// returned channel, which consist out the parsed RR, a potential comment or an error. +// If there is an error the RR is nil. The string file is only used +// in error reporting. The string origin is used as the initial origin, as +// if the file would start with: $ORIGIN origin . +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported. +// The channel t is closed by ParseZone when the end of r is reached. +// +// Basic usage pattern when reading from a string (z) containing the +// zone data: +// +// for x := range dns.ParseZone(strings.NewReader(z), "", "") { +// if x.Error != nil { +// // log.Println(x.Error) +// } else { +// // Do something with x.RR +// } +// } +// +// Comments specified after an RR (and on the same line!) are returned too: +// +// foo. IN A 10.0.0.1 ; this is a comment +// +// The text "; this is comment" is returned in Token.Comment. Comments inside the +// RR are discarded. Comments on a line by themselves are discarded too. +func ParseZone(r io.Reader, origin, file string) chan *Token { + return parseZoneHelper(r, origin, file, 10000) +} + +func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token { + t := make(chan *Token, chansize) + go parseZone(r, origin, file, t, 0) + return t +} + +func parseZone(r io.Reader, origin, f string, t chan *Token, include int) { + defer func() { + if include == 0 { + close(t) + } + }() + s := scanInit(r) + c := make(chan lex) + // Start the lexer + go zlexer(s, c) + // 6 possible beginnings of a line, _ is a space + // 0. zRRTYPE -> all omitted until the rrtype + // 1. zOwner _ zRrtype -> class/ttl omitted + // 2. zOwner _ zString _ zRrtype -> class omitted + // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class + // 4. zOwner _ zClass _ zRrtype -> ttl omitted + // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) + // After detecting these, we know the zRrtype so we can jump to functions + // handling the rdata for each of these types. + + if origin == "" { + origin = "." + } + origin = Fqdn(origin) + if _, ok := IsDomainName(origin); !ok { + t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}} + return + } + + st := zExpectOwnerDir // initial state + var h RR_Header + var defttl uint32 = defaultTtl + var prevName string + for l := range c { + // Lexer spotted an error already + if l.err == true { + t <- &Token{Error: &ParseError{f, l.token, l}} + return + + } + switch st { + case zExpectOwnerDir: + // We can also expect a directive, like $TTL or $ORIGIN + h.Ttl = defttl + h.Class = ClassINET + switch l.value { + case zNewline: + st = zExpectOwnerDir + case zOwner: + h.Name = l.token + if l.token[0] == '@' { + h.Name = origin + prevName = h.Name + st = zExpectOwnerBl + break + } + if h.Name[l.length-1] != '.' { + h.Name = appendOrigin(h.Name, origin) + } + _, ok := IsDomainName(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "bad owner name", l}} + return + } + prevName = h.Name + st = zExpectOwnerBl + case zDirTtl: + st = zExpectDirTtlBl + case zDirOrigin: + st = zExpectDirOriginBl + case zDirInclude: + st = zExpectDirIncludeBl + case zDirGenerate: + st = zExpectDirGenerateBl + case zRrtpe: + h.Name = prevName + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Name = prevName + h.Class = l.torc + st = zExpectAnyNoClassBl + case zBlank: + // Discard, can happen when there is nothing on the + // line except the RR type + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // Don't about the defttl, we should take the $TTL value + // defttl = ttl + st = zExpectAnyNoTtlBl + + default: + t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}} + return + } + case zExpectDirIncludeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}} + return + } + st = zExpectDirInclude + case zExpectDirInclude: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}} + return + } + neworigin := origin // There may be optionally a new origin set after the filename, if not use current one + l := <-c + switch l.value { + case zBlank: + l := <-c + if l.value == zString { + if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + // a new origin is specified. + if l.token[l.length-1] != '.' { + if origin != "." { // Prevent .. endings + neworigin = l.token + "." + origin + } else { + neworigin = l.token + origin + } + } else { + neworigin = l.token + } + } + case zNewline, zEOF: + // Ok + default: + t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}} + return + } + // Start with the new file + r1, e1 := os.Open(l.token) + if e1 != nil { + t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}} + return + } + if include+1 > 7 { + t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}} + return + } + parseZone(r1, l.token, neworigin, t, include+1) + st = zExpectOwnerDir + case zExpectDirTtlBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}} + return + } + st = zExpectDirTtl + case zExpectDirTtl: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + return + } + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + defttl = ttl + st = zExpectOwnerDir + case zExpectDirOriginBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}} + return + } + st = zExpectDirOrigin + case zExpectDirOrigin: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + } + if _, ok := IsDomainName(l.token); !ok { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + if l.token[l.length-1] != '.' { + if origin != "." { // Prevent .. endings + origin = l.token + "." + origin + } else { + origin = l.token + origin + } + } else { + origin = l.token + } + st = zExpectOwnerDir + case zExpectDirGenerateBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}} + return + } + st = zExpectDirGenerate + case zExpectDirGenerate: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}} + return + } + if errMsg := generate(l, c, t, origin); errMsg != "" { + t <- &Token{Error: &ParseError{f, errMsg, l}} + return + } + st = zExpectOwnerDir + case zExpectOwnerBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after owner", l}} + return + } + st = zExpectAny + case zExpectAny: + switch l.value { + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Class = l.torc + st = zExpectAnyNoClassBl + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // defttl = ttl // don't set the defttl here + st = zExpectAnyNoTtlBl + default: + t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}} + return + } + case zExpectAnyNoClassBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before class", l}} + return + } + st = zExpectAnyNoClass + case zExpectAnyNoTtlBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before TTL", l}} + return + } + st = zExpectAnyNoTtl + case zExpectAnyNoTtl: + switch l.value { + case zClass: + h.Class = l.torc + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}} + return + } + case zExpectAnyNoClass: + switch l.value { + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // defttl = ttl // don't set the def ttl anymore + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}} + return + } + case zExpectRrtypeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before RR type", l}} + return + } + st = zExpectRrtype + case zExpectRrtype: + if l.value != zRrtpe { + t <- &Token{Error: &ParseError{f, "unknown RR type", l}} + return + } + h.Rrtype = l.torc + st = zExpectRdata + case zExpectRdata: + r, e, c1 := setRR(h, c, origin, f) + if e != nil { + // If e.lex is nil than we have encounter a unknown RR type + // in that case we substitute our current lex token + if e.lex.token == "" && e.lex.value == 0 { + e.lex = l // Uh, dirty + } + t <- &Token{Error: e} + return + } + t <- &Token{RR: r, Comment: c1} + st = zExpectOwnerDir + } + } + // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this + // is not an error, because an empty zone file is still a zone file. +} + +// zlexer scans the sourcefile and returns tokens on the channel c. +func zlexer(s *scan, c chan lex) { + var l lex + str := make([]byte, maxTok) // Should be enough for any token + stri := 0 // Offset in str (0 means empty) + com := make([]byte, maxTok) // Hold comment text + comi := 0 + quote := false + escape := false + space := false + commt := false + rrtype := false + owner := true + brace := 0 + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + if stri >= maxTok { + l.token = "token length insufficient for parsing" + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + if comi >= maxTok { + l.token = "comment length insufficient for parsing" + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + + switch x { + case ' ', '\t': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if commt { + com[comi] = x + comi++ + break + } + if stri == 0 { + // Space directly in the beginning, handled in the grammar + } else if owner { + // If we have a string and its the first, make it an owner + l.value = zOwner + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + // escape $... start with a \ not a $, so this will work + switch l.tokenUpper { + case "$TTL": + l.value = zDirTtl + case "$ORIGIN": + l.value = zDirOrigin + case "$INCLUDE": + l.value = zDirInclude + case "$GENERATE": + l.value = zDirGenerate + } + debug.Printf("[7 %+v]", l.token) + c <- l + } else { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } else { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok := typeToInt(l.token) + if !ok { + l.token = "unknown RR type" + l.err = true + c <- l + return + } + l.value = zRrtpe + l.torc = t + } + } + if t, ok := StringToClass[l.tokenUpper]; ok { + l.value = zClass + l.torc = t + } else { + if strings.HasPrefix(l.tokenUpper, "CLASS") { + t, ok := classToInt(l.token) + if !ok { + l.token = "unknown class" + l.err = true + c <- l + return + } + l.value = zClass + l.torc = t + } + } + } + debug.Printf("[6 %+v]", l.token) + c <- l + } + stri = 0 + // I reverse space stuff here + if !space && !commt { + l.value = zBlank + l.token = " " + l.length = 1 + debug.Printf("[5 %+v]", l.token) + c <- l + } + owner = false + space = true + case ';': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if stri > 0 { + l.value = zString + l.token = string(str[:stri]) + l.length = stri + debug.Printf("[4 %+v]", l.token) + c <- l + stri = 0 + } + commt = true + com[comi] = ';' + comi++ + case '\r': + escape = false + if quote { + str[stri] = x + stri++ + break + } + // discard if outside of quotes + case '\n': + escape = false + // Escaped newline + if quote { + str[stri] = x + stri++ + break + } + // inside quotes this is legal + if commt { + // Reset a comment + commt = false + rrtype = false + stri = 0 + // If not in a brace this ends the comment AND the RR + if brace == 0 { + owner = true + owner = true + l.value = zNewline + l.token = "\n" + l.length = 1 + l.comment = string(com[:comi]) + debug.Printf("[3 %+v %+v]", l.token, l.comment) + c <- l + l.comment = "" + comi = 0 + break + } + com[comi] = ' ' // convert newline to space + comi++ + break + } + + if brace == 0 { + // If there is previous text, we should output it here + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } + } + debug.Printf("[2 %+v]", l.token) + c <- l + } + l.value = zNewline + l.token = "\n" + l.length = 1 + debug.Printf("[1 %+v]", l.token) + c <- l + stri = 0 + commt = false + rrtype = false + owner = true + comi = 0 + } + case '\\': + // comments do not get escaped chars, everything is copied + if commt { + com[comi] = x + comi++ + break + } + // something already escaped must be in string + if escape { + str[stri] = x + stri++ + escape = false + break + } + // something escaped outside of string gets added to string + str[stri] = x + stri++ + escape = true + case '"': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + space = false + // send previous gathered text and the quote + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.length = stri + + debug.Printf("[%+v]", l.token) + c <- l + stri = 0 + } + + // send quote itself as separate token + l.value = zQuote + l.token = "\"" + l.length = 1 + c <- l + quote = !quote + case '(', ')': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + if quote { + str[stri] = x + stri++ + break + } + switch x { + case ')': + brace-- + if brace < 0 { + l.token = "extra closing brace" + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + case '(': + brace++ + } + default: + escape = false + if commt { + com[comi] = x + comi++ + break + } + str[stri] = x + stri++ + space = false + } + x, err = s.tokenText() + } + if stri > 0 { + // Send remainder + l.token = string(str[:stri]) + l.length = stri + l.value = zString + debug.Printf("[%+v]", l.token) + c <- l + } +} + +// Extract the class number from CLASSxx +func classToInt(token string) (uint16, bool) { + offset := 5 + if len(token) < offset+1 { + return 0, false + } + class, ok := strconv.Atoi(token[offset:]) + if ok != nil || class > maxUint16 { + return 0, false + } + return uint16(class), true +} + +// Extract the rr number from TYPExxx +func typeToInt(token string) (uint16, bool) { + offset := 4 + if len(token) < offset+1 { + return 0, false + } + typ, ok := strconv.Atoi(token[offset:]) + if ok != nil || typ > maxUint16 { + return 0, false + } + return uint16(typ), true +} + +// Parse things like 2w, 2m, etc, Return the time in seconds. +func stringToTtl(token string) (uint32, bool) { + s := uint32(0) + i := uint32(0) + for _, c := range token { + switch c { + case 's', 'S': + s += i + i = 0 + case 'm', 'M': + s += i * 60 + i = 0 + case 'h', 'H': + s += i * 60 * 60 + i = 0 + case 'd', 'D': + s += i * 60 * 60 * 24 + i = 0 + case 'w', 'W': + s += i * 60 * 60 * 24 * 7 + i = 0 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i *= 10 + i += uint32(c) - '0' + default: + return 0, false + } + } + return s + i, true +} + +// Parse LOC records' [.][mM] into a +// mantissa exponent format. Token should contain the entire +// string (i.e. no spaces allowed) +func stringToCm(token string) (e, m uint8, ok bool) { + if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { + token = token[0 : len(token)-1] + } + s := strings.SplitN(token, ".", 2) + var meters, cmeters, val int + var err error + switch len(s) { + case 2: + if cmeters, err = strconv.Atoi(s[1]); err != nil { + return + } + fallthrough + case 1: + if meters, err = strconv.Atoi(s[0]); err != nil { + return + } + case 0: + // huh? + return 0, 0, false + } + ok = true + if meters > 0 { + e = 2 + val = meters + } else { + e = 0 + val = cmeters + } + for val > 10 { + e++ + val /= 10 + } + if e > 9 { + ok = false + } + m = uint8(val) + return +} + +func appendOrigin(name, origin string) string { + if origin == "." { + return name + origin + } + return name + "." + origin +} + +// LOC record helper function +func locCheckNorth(token string, latitude uint32) (uint32, bool) { + switch token { + case "n", "N": + return LOC_EQUATOR + latitude, true + case "s", "S": + return LOC_EQUATOR - latitude, true + } + return latitude, false +} + +// LOC record helper function +func locCheckEast(token string, longitude uint32) (uint32, bool) { + switch token { + case "e", "E": + return LOC_EQUATOR + longitude, true + case "w", "W": + return LOC_EQUATOR - longitude, true + } + return longitude, false +} + +// "Eat" the rest of the "line". Return potential comments +func slurpRemainder(c chan lex, f string) (*ParseError, string) { + l := <-c + com := "" + switch l.value { + case zBlank: + l = <-c + com = l.comment + if l.value != zNewline && l.value != zEOF { + return &ParseError{f, "garbage after rdata", l}, "" + } + case zNewline: + com = l.comment + case zEOF: + default: + return &ParseError{f, "garbage after rdata", l}, "" + } + return nil, com +} + +// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" +// Used for NID and L64 record. +func stringToNodeID(l lex) (uint64, *ParseError) { + if len(l.token) < 19 { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + // There must be three colons at fixes postitions, if not its a parse error + if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + return u, nil +} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go new file mode 100644 index 000000000..e521dc063 --- /dev/null +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -0,0 +1,2143 @@ +package dns + +import ( + "encoding/base64" + "net" + "strconv" + "strings" +) + +type parserFunc struct { + // Func defines the function that parses the tokens and returns the RR + // or an error. The last string contains any comments in the line as + // they returned by the lexer as well. + Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string) + // Signals if the RR ending is of variable length, like TXT or records + // that have Hexadecimal or Base64 as their last element in the Rdata. Records + // that have a fixed ending or for instance A, AAAA, SOA and etc. + Variable bool +} + +// Parse the rdata of each rrtype. +// All data from the channel c is either zString or zBlank. +// After the rdata there may come a zBlank and then a zNewline +// or immediately a zNewline. If this is not the case we flag +// an *ParseError: garbage after rdata. +func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + parserfunc, ok := typeToparserFunc[h.Rrtype] + if ok { + r, e, cm := parserfunc.Func(h, c, o, f) + if parserfunc.Variable { + return r, e, cm + } + if e != nil { + return nil, e, "" + } + e, cm = slurpRemainder(c, f) + if e != nil { + return nil, e, "" + } + return r, nil, cm + } + // RFC3957 RR (Unknown RR handling) + return setRFC3597(h, c, o, f) +} + +// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) +// or an error +func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) { + s := "" + l := <-c // zString + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + s += l.token + case zBlank: // Ok + default: + return "", &ParseError{f, errstr, l}, "" + } + l = <-c + } + return s, nil, l.comment +} + +// A remainder of the rdata with embedded spaces, return the parsed string slice (sans the spaces) +// or an error +func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) { + // Get the remaining data until we see a zNewline + quote := false + l := <-c + var s []string + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + switch l.value == zQuote { + case true: // A number of quoted string + s = make([]string, 0) + empty := true + for l.value != zNewline && l.value != zEOF { + if l.err { + return nil, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + empty = false + if len(l.token) > 255 { + // split up tokens that are larger than 255 into 255-chunks + sx := []string{} + p, i := 0, 255 + for { + if i <= len(l.token) { + sx = append(sx, l.token[p:i]) + } else { + sx = append(sx, l.token[p:]) + break + + } + p, i = p+255, i+255 + } + s = append(s, sx...) + break + } + + s = append(s, l.token) + case zBlank: + if quote { + // zBlank can only be seen in between txt parts. + return nil, &ParseError{f, errstr, l}, "" + } + case zQuote: + if empty && quote { + s = append(s, "") + } + quote = !quote + empty = true + default: + return nil, &ParseError{f, errstr, l}, "" + } + l = <-c + } + if quote { + return nil, &ParseError{f, errstr, l}, "" + } + case false: // Unquoted text record + s = make([]string, 1) + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + s[0] += l.token + l = <-c + } + } + return s, nil, l.comment +} + +func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(A) + rr.Hdr = h + + l := <-c + if l.length == 0 { // Dynamic updates. + return rr, nil, "" + } + rr.A = net.ParseIP(l.token) + if rr.A == nil || l.err { + return nil, &ParseError{f, "bad A A", l}, "" + } + return rr, nil, "" +} + +func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AAAA) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + rr.AAAA = net.ParseIP(l.token) + if rr.AAAA == nil || l.err { + return nil, &ParseError{f, "bad AAAA AAAA", l}, "" + } + return rr, nil, "" +} + +func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NS) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Ns = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NS Ns", l}, "" + } + if rr.Ns[l.length-1] != '.' { + rr.Ns = appendOrigin(rr.Ns, o) + } + return rr, nil, "" +} + +func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + if l.token == "@" { + rr.Ptr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PTR Ptr", l}, "" + } + if rr.Ptr[l.length-1] != '.' { + rr.Ptr = appendOrigin(rr.Ptr, o) + } + return rr, nil, "" +} + +func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSAPPTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Ptr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" + } + if rr.Ptr[l.length-1] != '.' { + rr.Ptr = appendOrigin(rr.Ptr, o) + } + return rr, nil, "" +} + +func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RP) + rr.Hdr = h + + l := <-c + rr.Mbox = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mbox = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RP Mbox", l}, "" + } + if rr.Mbox[l.length-1] != '.' { + rr.Mbox = appendOrigin(rr.Mbox, o) + } + } + <-c // zBlank + l = <-c + rr.Txt = l.token + if l.token == "@" { + rr.Txt = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RP Txt", l}, "" + } + if rr.Txt[l.length-1] != '.' { + rr.Txt = appendOrigin(rr.Txt, o) + } + return rr, nil, "" +} + +func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MR) + rr.Hdr = h + + l := <-c + rr.Mr = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MR Mr", l}, "" + } + if rr.Mr[l.length-1] != '.' { + rr.Mr = appendOrigin(rr.Mr, o) + } + return rr, nil, "" +} + +func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MB) + rr.Hdr = h + + l := <-c + rr.Mb = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mb = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MB Mb", l}, "" + } + if rr.Mb[l.length-1] != '.' { + rr.Mb = appendOrigin(rr.Mb, o) + } + return rr, nil, "" +} + +func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MG) + rr.Hdr = h + + l := <-c + rr.Mg = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mg = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MG Mg", l}, "" + } + if rr.Mg[l.length-1] != '.' { + rr.Mg = appendOrigin(rr.Mg, o) + } + return rr, nil, "" +} + +func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HINFO) + rr.Hdr = h + + chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f) + if e != nil { + return nil, e, c1 + } + + if ln := len(chunks); ln == 0 { + return rr, nil, "" + } else if ln == 1 { + // Can we split it? + if out := strings.Fields(chunks[0]); len(out) > 1 { + chunks = out + } else { + chunks = append(chunks, "") + } + } + + rr.Cpu = chunks[0] + rr.Os = strings.Join(chunks[1:], " ") + + return rr, nil, "" +} + +func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MINFO) + rr.Hdr = h + + l := <-c + rr.Rmail = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Rmail = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MINFO Rmail", l}, "" + } + if rr.Rmail[l.length-1] != '.' { + rr.Rmail = appendOrigin(rr.Rmail, o) + } + } + <-c // zBlank + l = <-c + rr.Email = l.token + if l.token == "@" { + rr.Email = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MINFO Email", l}, "" + } + if rr.Email[l.length-1] != '.' { + rr.Email = appendOrigin(rr.Email, o) + } + return rr, nil, "" +} + +func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MF) + rr.Hdr = h + + l := <-c + rr.Mf = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mf = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MF Mf", l}, "" + } + if rr.Mf[l.length-1] != '.' { + rr.Mf = appendOrigin(rr.Mf, o) + } + return rr, nil, "" +} + +func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MD) + rr.Hdr = h + + l := <-c + rr.Md = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Md = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MD Md", l}, "" + } + if rr.Md[l.length-1] != '.' { + rr.Md = appendOrigin(rr.Md, o) + } + return rr, nil, "" +} + +func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad MX Pref", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Mx = l.token + if l.token == "@" { + rr.Mx = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MX Mx", l}, "" + } + if rr.Mx[l.length-1] != '.' { + rr.Mx = appendOrigin(rr.Mx, o) + } + return rr, nil, "" +} + +func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RT) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil { + return nil, &ParseError{f, "bad RT Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Host = l.token + if l.token == "@" { + rr.Host = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RT Host", l}, "" + } + if rr.Host[l.length-1] != '.' { + rr.Host = appendOrigin(rr.Host, o) + } + return rr, nil, "" +} + +func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AFSDB) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" + } + rr.Subtype = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Hostname = l.token + if l.token == "@" { + rr.Hostname = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" + } + if rr.Hostname[l.length-1] != '.' { + rr.Hostname = appendOrigin(rr.Hostname, o) + } + return rr, nil, "" +} + +func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(X25) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.err { + return nil, &ParseError{f, "bad X25 PSDNAddress", l}, "" + } + rr.PSDNAddress = l.token + return rr, nil, "" +} + +func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(KX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad KX Pref", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Exchanger = l.token + if l.token == "@" { + rr.Exchanger = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad KX Exchanger", l}, "" + } + if rr.Exchanger[l.length-1] != '.' { + rr.Exchanger = appendOrigin(rr.Exchanger, o) + } + return rr, nil, "" +} + +func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad CNAME Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(DNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad CNAME Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SOA) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { + return rr, nil, "" + } + <-c // zBlank + if l.token == "@" { + rr.Ns = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SOA Ns", l}, "" + } + if rr.Ns[l.length-1] != '.' { + rr.Ns = appendOrigin(rr.Ns, o) + } + } + + l = <-c + rr.Mbox = l.token + if l.token == "@" { + rr.Mbox = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SOA Mbox", l}, "" + } + if rr.Mbox[l.length-1] != '.' { + rr.Mbox = appendOrigin(rr.Mbox, o) + } + } + <-c // zBlank + + var ( + v uint32 + ok bool + ) + for i := 0; i < 5; i++ { + l = <-c + if l.err { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + if j, e := strconv.Atoi(l.token); e != nil { + if i == 0 { + // Serial should be a number + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + if v, ok = stringToTtl(l.token); !ok { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + + } + } else { + v = uint32(j) + } + switch i { + case 0: + rr.Serial = v + <-c // zBlank + case 1: + rr.Refresh = v + <-c // zBlank + case 2: + rr.Retry = v + <-c // zBlank + case 3: + rr.Expire = v + <-c // zBlank + case 4: + rr.Minttl = v + } + } + return rr, nil, "" +} + +func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SRV) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Priority", l}, "" + } + rr.Priority = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Weight", l}, "" + } + rr.Weight = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Port", l}, "" + } + rr.Port = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Target = l.token + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SRV Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NAPTR) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Order", l}, "" + } + rr.Order = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Preference", l}, "" + } + rr.Preference = uint16(i) + // Flags + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Flags = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + } else if l.value == zQuote { + rr.Flags = "" + } else { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + + // Service + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Service = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + } else if l.value == zQuote { + rr.Service = "" + } else { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + + // Regexp + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Regexp = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + } else if l.value == zQuote { + rr.Regexp = "" + } else { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + // After quote no space?? + <-c // zBlank + l = <-c // zString + rr.Replacement = l.token + if l.token == "@" { + rr.Replacement = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" + } + if rr.Replacement[l.length-1] != '.' { + rr.Replacement = appendOrigin(rr.Replacement, o) + } + return rr, nil, "" +} + +func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TALINK) + rr.Hdr = h + + l := <-c + rr.PreviousName = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.PreviousName = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" + } + if rr.PreviousName[l.length-1] != '.' { + rr.PreviousName = appendOrigin(rr.PreviousName, o) + } + } + <-c // zBlank + l = <-c + rr.NextName = l.token + if l.token == "@" { + rr.NextName = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad TALINK NextName", l}, "" + } + if rr.NextName[l.length-1] != '.' { + rr.NextName = appendOrigin(rr.NextName, o) + } + return rr, nil, "" +} + +func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LOC) + rr.Hdr = h + // Non zero defaults for LOC record, see RFC 1876, Section 3. + rr.HorizPre = 165 // 10000 + rr.VertPre = 162 // 10 + rr.Size = 18 // 1 + ok := false + // North + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude", l}, "" + } + rr.Latitude = 1000 * 60 * 60 * uint32(i) + + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" + } + rr.Latitude += 1000 * 60 * uint32(i) + + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" + } else { + rr.Latitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Latitude North/South", l}, "" + +East: + // East + <-c // zBlank + l = <-c + if i, e := strconv.Atoi(l.token); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude", l}, "" + } else { + rr.Longitude = 1000 * 60 * 60 * uint32(i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + if i, e := strconv.Atoi(l.token); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" + } else { + rr.Longitude += 1000 * 60 * uint32(i) + } + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" + } else { + rr.Longitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" + +Altitude: + <-c // zBlank + l = <-c + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } + if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { + l.token = l.token[0 : len(l.token)-1] + } + if i, e := strconv.ParseFloat(l.token, 32); e != nil { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } else { + rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) + } + + // And now optionally the other values + l = <-c + count := 0 + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + switch count { + case 0: // Size + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC Size", l}, "" + } + rr.Size = (e & 0x0f) | (m << 4 & 0xf0) + case 1: // HorizPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC HorizPre", l}, "" + } + rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0) + case 2: // VertPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC VertPre", l}, "" + } + rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0) + } + count++ + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" + } + l = <-c + } + return rr, nil, "" +} + +func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HIP) + rr.Hdr = h + + // HitLength is not represented + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" + } + rr.PublicKeyAlgorithm = uint8(i) + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP Hit", l}, "" + } + rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. + rr.HitLength = uint8(len(rr.Hit)) / 2 + + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP PublicKey", l}, "" + } + rr.PublicKey = l.token // This cannot contain spaces + rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) + + // RendezvousServers (if any) + l = <-c + var xs []string + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + if l.token == "@" { + xs = append(xs, o) + l = <-c + continue + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + if l.token[l.length-1] != '.' { + l.token = appendOrigin(l.token, o) + } + xs = append(xs, l.token) + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + l = <-c + } + rr.RendezvousServers = xs + return rr, nil, l.comment +} + +func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CERT) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + if v, ok := StringToCertType[l.token]; ok { + rr.Type = v + } else if i, e := strconv.Atoi(l.token); e != nil { + return nil, &ParseError{f, "bad CERT Type", l}, "" + } else { + rr.Type = uint16(i) + } + <-c // zBlank + l = <-c // zString + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad CERT KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c // zString + if v, ok := StringToAlgorithm[l.token]; ok { + rr.Algorithm = v + } else if i, e := strconv.Atoi(l.token); e != nil { + return nil, &ParseError{f, "bad CERT Algorithm", l}, "" + } else { + rr.Algorithm = uint8(i) + } + s, e1, c1 := endingToString(c, "bad CERT Certificate", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(OPENPGPKEY) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f) + if e != nil { + return nil, e, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setRRSIG(h, c, o, f) + if r != nil { + return &SIG{*r.(*RRSIG)}, e, s + } + return nil, e, s +} + +func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RRSIG) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + if t, ok := StringToType[l.tokenUpper]; !ok { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok = typeToInt(l.tokenUpper) + if !ok { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + rr.TypeCovered = t + } else { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + } else { + rr.TypeCovered = t + } + <-c // zBlank + l = <-c + i, err := strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + <-c // zBlank + l = <-c + i, err = strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Labels", l}, "" + } + rr.Labels = uint8(i) + <-c // zBlank + l = <-c + i, err = strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" + } + rr.OrigTtl = uint32(i) + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + // Try to see if all numeric and use it as epoch + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + // TODO(miek): error out on > MAX_UINT32, same below + rr.Expiration = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Expiration", l}, "" + } + } else { + rr.Expiration = i + } + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + rr.Inception = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Inception", l}, "" + } + } else { + rr.Inception = i + } + <-c // zBlank + l = <-c + i, err = strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + rr.SignerName = l.token + if l.token == "@" { + rr.SignerName = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" + } + if rr.SignerName[l.length-1] != '.' { + rr.SignerName = appendOrigin(rr.SignerName, o) + } + } + s, e, c1 := endingToString(c, "bad RRSIG Signature", f) + if e != nil { + return nil, e, c1 + } + rr.Signature = s + return rr, nil, c1 +} + +func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC) + rr.Hdr = h + + l := <-c + rr.NextDomain = l.token + if l.length == 0 { + return rr, nil, l.comment + } + if l.token == "@" { + rr.NextDomain = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" + } + if rr.NextDomain[l.length-1] != '.' { + rr.NextDomain = appendOrigin(rr.NextDomain, o) + } + } + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" + } + rr.SaltLength = uint8(len(l.token)) / 2 + rr.Salt = l.token + + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, "" + } + rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) + rr.NextDomain = l.token + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3PARAM) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + rr.SaltLength = uint8(len(l.token)) + rr.Salt = l.token + return rr, nil, "" +} + +func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI48) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.length != 17 || l.err { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + addr := make([]byte, 12) + dash := 0 + for i := 0; i < 10; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + } + addr[10] = l.token[15] + addr[11] = l.token[16] + + i, e := strconv.ParseUint(string(addr), 16, 48) + if e != nil { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + rr.Address = i + return rr, nil, "" +} + +func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI64) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.length != 23 || l.err { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + addr := make([]byte, 16) + dash := 0 + for i := 0; i < 14; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + } + addr[14] = l.token[21] + addr[15] = l.token[22] + + i, e := strconv.ParseUint(string(addr), 16, 64) + if e != nil { + return nil, &ParseError{f, "bad EUI68 Address", l}, "" + } + rr.Address = uint64(i) + return rr, nil, "" +} + +func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SSHFP) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Type", l}, "" + } + rr.Type = uint8(i) + <-c // zBlank + s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) + if e1 != nil { + return nil, e1, c1 + } + rr.FingerPrint = s + return rr, nil, "" +} + +func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DNSKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "KEY") + if r != nil { + return &KEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") + return r, e, s +} + +func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") + if r != nil { + return &CDNSKEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EID) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad EID Endpoint", f) + if e != nil { + return nil, e, c1 + } + rr.Endpoint = s + return rr, nil, c1 +} + +func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NIMLOC) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) + if e != nil { + return nil, e, c1 + } + rr.Locator = s + return rr, nil, c1 +} + +func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GPOS) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + _, e := strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Longitude", l}, "" + } + rr.Longitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Latitude", l}, "" + } + rr.Latitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Altitude", l}, "" + } + rr.Altitude = l.token + return rr, nil, "" +} + +func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DS) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e := strconv.Atoi(l.token); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DS") + return r, e, s +} + +func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DLV") + if r != nil { + return &DLV{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "CDS") + if r != nil { + return &CDS{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e := strconv.Atoi(l.token); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad TA Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e, c1 := endingToString(c, "bad TA Digest", f) + if e != nil { + return nil, e.(*ParseError), c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TLSA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Usage", l}, "" + } + rr.Usage = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Selector", l}, "" + } + rr.Selector = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2, c1 := endingToString(c, "bad TLSA Certificate", f) + if e2 != nil { + return nil, e2, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RFC3597) + rr.Hdr = h + l := <-c + if l.token != "\\#" { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + <-c // zBlank + l = <-c + rdlength, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" + } + + s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f) + if e1 != nil { + return nil, e1, c1 + } + if rdlength*2 != len(s) { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + rr.Rdata = s + return rr, nil, c1 +} + +func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SPF) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TXT) + rr.Hdr = h + + // no zBlank reading here, because all this rdata is TXT + s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +// identical to setTXT +func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NINFO) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f) + if e != nil { + return nil, e, "" + } + rr.ZSData = s + return rr, nil, c1 +} + +func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(URI) + rr.Hdr = h + + l := <-c + if l.length == 0 { // Dynamic updates. + return rr, nil, "" + } + + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Priority", l}, "" + } + rr.Priority = uint16(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Weight", l}, "" + } + rr.Weight = uint16(i) + + <-c // zBlank + s, err, c1 := endingToTxtSlice(c, "bad URI Target", f) + if err != nil { + return nil, err, "" + } + if len(s) > 1 { + return nil, &ParseError{f, "bad URI Target", l}, "" + } + rr.Target = s[0] + return rr, nil, c1 +} + +func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + // awesome record to parse! + rr := new(DHCID) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad DHCID Digest", f) + if e != nil { + return nil, e, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NID) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NID Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.NodeID = u + return rr, nil, "" +} + +func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L32) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad L32 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Locator32 = net.ParseIP(l.token) + if rr.Locator32 == nil || l.err { + return nil, &ParseError{f, "bad L32 Locator", l}, "" + } + return rr, nil, "" +} + +func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LP) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad LP Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Fqdn = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Fqdn = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad LP Fqdn", l}, "" + } + if rr.Fqdn[l.length-1] != '.' { + rr.Fqdn = appendOrigin(rr.Fqdn, o) + } + return rr, nil, "" +} + +func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L64) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad L64 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.Locator64 = u + return rr, nil, "" +} + +func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UID) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad UID Uid", l}, "" + } + rr.Uid = uint32(i) + return rr, nil, "" +} + +func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GID) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad GID Gid", l}, "" + } + rr.Gid = uint32(i) + return rr, nil, "" +} + +func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UINFO) + rr.Hdr = h + s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) + if e != nil { + return nil, e, "" + } + rr.Uinfo = s[0] // silently discard anything above + return rr, nil, c1 +} + +func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad PX Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Map822 = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Map822 = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PX Map822", l}, "" + } + if rr.Map822[l.length-1] != '.' { + rr.Map822 = appendOrigin(rr.Map822, o) + } + <-c // zBlank + l = <-c // zString + rr.Mapx400 = l.token + if l.token == "@" { + rr.Mapx400 = o + return rr, nil, "" + } + _, ok = IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PX Mapx400", l}, "" + } + if rr.Mapx400[l.length-1] != '.' { + rr.Mapx400 = appendOrigin(rr.Mapx400, o) + } + return rr, nil, "" +} + +func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CAA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, err := strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad CAA Flag", l}, "" + } + rr.Flag = uint8(i) + + <-c // zBlank + l = <-c // zString + if l.value != zString { + return nil, &ParseError{f, "bad CAA Tag", l}, "" + } + rr.Tag = l.token + + <-c // zBlank + s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) + if e != nil { + return nil, e, "" + } + if len(s) > 1 { + return nil, &ParseError{f, "bad CAA Value", l}, "" + } + rr.Value = s[0] + return rr, nil, c1 +} + +var typeToparserFunc = map[uint16]parserFunc{ + TypeAAAA: {setAAAA, false}, + TypeAFSDB: {setAFSDB, false}, + TypeA: {setA, false}, + TypeCAA: {setCAA, true}, + TypeCDS: {setCDS, true}, + TypeCDNSKEY: {setCDNSKEY, true}, + TypeCERT: {setCERT, true}, + TypeCNAME: {setCNAME, false}, + TypeDHCID: {setDHCID, true}, + TypeDLV: {setDLV, true}, + TypeDNAME: {setDNAME, false}, + TypeKEY: {setKEY, true}, + TypeDNSKEY: {setDNSKEY, true}, + TypeDS: {setDS, true}, + TypeEID: {setEID, true}, + TypeEUI48: {setEUI48, false}, + TypeEUI64: {setEUI64, false}, + TypeGID: {setGID, false}, + TypeGPOS: {setGPOS, false}, + TypeHINFO: {setHINFO, true}, + TypeHIP: {setHIP, true}, + TypeKX: {setKX, false}, + TypeL32: {setL32, false}, + TypeL64: {setL64, false}, + TypeLOC: {setLOC, true}, + TypeLP: {setLP, false}, + TypeMB: {setMB, false}, + TypeMD: {setMD, false}, + TypeMF: {setMF, false}, + TypeMG: {setMG, false}, + TypeMINFO: {setMINFO, false}, + TypeMR: {setMR, false}, + TypeMX: {setMX, false}, + TypeNAPTR: {setNAPTR, false}, + TypeNID: {setNID, false}, + TypeNIMLOC: {setNIMLOC, true}, + TypeNINFO: {setNINFO, true}, + TypeNSAPPTR: {setNSAPPTR, false}, + TypeNSEC3PARAM: {setNSEC3PARAM, false}, + TypeNSEC3: {setNSEC3, true}, + TypeNSEC: {setNSEC, true}, + TypeNS: {setNS, false}, + TypeOPENPGPKEY: {setOPENPGPKEY, true}, + TypePTR: {setPTR, false}, + TypePX: {setPX, false}, + TypeSIG: {setSIG, true}, + TypeRKEY: {setRKEY, true}, + TypeRP: {setRP, false}, + TypeRRSIG: {setRRSIG, true}, + TypeRT: {setRT, false}, + TypeSOA: {setSOA, false}, + TypeSPF: {setSPF, true}, + TypeSRV: {setSRV, false}, + TypeSSHFP: {setSSHFP, true}, + TypeTALINK: {setTALINK, false}, + TypeTA: {setTA, true}, + TypeTLSA: {setTLSA, true}, + TypeTXT: {setTXT, true}, + TypeUID: {setUID, false}, + TypeUINFO: {setUINFO, true}, + TypeURI: {setURI, true}, + TypeX25: {setX25, false}, +} diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go new file mode 100644 index 000000000..c29bc2f38 --- /dev/null +++ b/vendor/github.com/miekg/dns/scanner.go @@ -0,0 +1,43 @@ +package dns + +// Implement a simple scanner, return a byte stream from an io reader. + +import ( + "bufio" + "io" + "text/scanner" +) + +type scan struct { + src *bufio.Reader + position scanner.Position + eof bool // Have we just seen a eof +} + +func scanInit(r io.Reader) *scan { + s := new(scan) + s.src = bufio.NewReader(r) + s.position.Line = 1 + return s +} + +// tokenText returns the next byte from the input +func (s *scan) tokenText() (byte, error) { + c, err := s.src.ReadByte() + if err != nil { + return c, err + } + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if s.eof == true { + s.position.Line++ + s.position.Column = 0 + s.eof = false + } + if c == '\n' { + s.eof = true + return c, nil + } + s.position.Column++ + return c, nil +} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go new file mode 100644 index 000000000..2b4bff49f --- /dev/null +++ b/vendor/github.com/miekg/dns/server.go @@ -0,0 +1,732 @@ +// DNS server implementation. + +package dns + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "io" + "net" + "sync" + "time" +) + +// Maximum number of TCP queries before we close the socket. +const maxTCPQueries = 128 + +// Handler is implemented by any value that implements ServeDNS. +type Handler interface { + ServeDNS(w ResponseWriter, r *Msg) +} + +// A ResponseWriter interface is used by an DNS handler to +// construct an DNS response. +type ResponseWriter interface { + // LocalAddr returns the net.Addr of the server + LocalAddr() net.Addr + // RemoteAddr returns the net.Addr of the client that sent the current request. + RemoteAddr() net.Addr + // WriteMsg writes a reply back to the client. + WriteMsg(*Msg) error + // Write writes a raw buffer back to the client. + Write([]byte) (int, error) + // Close closes the connection. + Close() error + // TsigStatus returns the status of the Tsig. + TsigStatus() error + // TsigTimersOnly sets the tsig timers only boolean. + TsigTimersOnly(bool) + // Hijack lets the caller take over the connection. + // After a call to Hijack(), the DNS package will not do anything with the connection. + Hijack() +} + +type response struct { + hijacked bool // connection has been hijacked by handler + tsigStatus error + tsigTimersOnly bool + tsigRequestMAC string + tsigSecret map[string]string // the tsig secrets + udp *net.UDPConn // i/o connection if UDP was used + tcp net.Conn // i/o connection if TCP was used + udpSession *SessionUDP // oob data to get egress interface right + remoteAddr net.Addr // address of the client + writer Writer // writer to output the raw DNS bits +} + +// ServeMux is an DNS request multiplexer. It matches the +// zone name of each incoming request against a list of +// registered patterns add calls the handler for the pattern +// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning +// that queries for the DS record are redirected to the parent zone (if that +// is also registered), otherwise the child gets the query. +// ServeMux is also safe for concurrent access from multiple goroutines. +type ServeMux struct { + z map[string]Handler + m *sync.RWMutex +} + +// NewServeMux allocates and returns a new ServeMux. +func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} } + +// DefaultServeMux is the default ServeMux used by Serve. +var DefaultServeMux = NewServeMux() + +// The HandlerFunc type is an adapter to allow the use of +// ordinary functions as DNS handlers. If f is a function +// with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(ResponseWriter, *Msg) + +// ServeDNS calls f(w, r). +func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { + f(w, r) +} + +// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. +func HandleFailed(w ResponseWriter, r *Msg) { + m := new(Msg) + m.SetRcode(r, RcodeServerFailure) + // does not matter if this write fails + w.WriteMsg(m) +} + +func failedHandler() Handler { return HandlerFunc(HandleFailed) } + +// ListenAndServe Starts a server on address and network specified Invoke handler +// for incoming queries. +func ListenAndServe(addr string, network string, handler Handler) error { + server := &Server{Addr: addr, Net: network, Handler: handler} + return server.ListenAndServe() +} + +// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in +// http://golang.org/pkg/net/http/#ListenAndServeTLS +func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + server := &Server{ + Addr: addr, + Net: "tcp-tls", + TLSConfig: &config, + Handler: handler, + } + + return server.ListenAndServe() +} + +// ActivateAndServe activates a server with a listener from systemd, +// l and p should not both be non-nil. +// If both l and p are not nil only p will be used. +// Invoke handler for incoming queries. +func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { + server := &Server{Listener: l, PacketConn: p, Handler: handler} + return server.ActivateAndServe() +} + +func (mux *ServeMux) match(q string, t uint16) Handler { + mux.m.RLock() + defer mux.m.RUnlock() + var handler Handler + b := make([]byte, len(q)) // worst case, one label of length q + off := 0 + end := false + for { + l := len(q[off:]) + for i := 0; i < l; i++ { + b[i] = q[off+i] + if b[i] >= 'A' && b[i] <= 'Z' { + b[i] |= ('a' - 'A') + } + } + if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key + if t != TypeDS { + return h + } + // Continue for DS to see if we have a parent too, if so delegeate to the parent + handler = h + } + off, end = NextLabel(q, off) + if end { + break + } + } + // Wildcard match, if we have found nothing try the root zone as a last resort. + if h, ok := mux.z["."]; ok { + return h + } + return handler +} + +// Handle adds a handler to the ServeMux for pattern. +func (mux *ServeMux) Handle(pattern string, handler Handler) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + mux.z[Fqdn(pattern)] = handler + mux.m.Unlock() +} + +// HandleFunc adds a handler function to the ServeMux for pattern. +func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + mux.Handle(pattern, HandlerFunc(handler)) +} + +// HandleRemove deregistrars the handler specific for pattern from the ServeMux. +func (mux *ServeMux) HandleRemove(pattern string) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + delete(mux.z, Fqdn(pattern)) + mux.m.Unlock() +} + +// ServeDNS dispatches the request to the handler whose +// pattern most closely matches the request message. If DefaultServeMux +// is used the correct thing for DS queries is done: a possible parent +// is sought. +// If no handler is found a standard SERVFAIL message is returned +// If the request message does not have exactly one question in the +// question section a SERVFAIL is returned, unlesss Unsafe is true. +func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) { + var h Handler + if len(request.Question) < 1 { // allow more than one question + h = failedHandler() + } else { + if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil { + h = failedHandler() + } + } + h.ServeDNS(w, request) +} + +// Handle registers the handler with the given pattern +// in the DefaultServeMux. The documentation for +// ServeMux explains how patterns are matched. +func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } + +// HandleRemove deregisters the handle with the given pattern +// in the DefaultServeMux. +func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } + +// HandleFunc registers the handler function with the given pattern +// in the DefaultServeMux. +func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + DefaultServeMux.HandleFunc(pattern, handler) +} + +// Writer writes raw DNS messages; each call to Write should send an entire message. +type Writer interface { + io.Writer +} + +// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. +type Reader interface { + // ReadTCP reads a raw message from a TCP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) + // ReadUDP reads a raw message from a UDP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) +} + +// defaultReader is an adapter for the Server struct that implements the Reader interface +// using the readTCP and readUDP func of the embedded Server. +type defaultReader struct { + *Server +} + +func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + return dr.readTCP(conn, timeout) +} + +func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + return dr.readUDP(conn, timeout) +} + +// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. +// Implementations should never return a nil Reader. +type DecorateReader func(Reader) Reader + +// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. +// Implementations should never return a nil Writer. +type DecorateWriter func(Writer) Writer + +// A Server defines parameters for running an DNS server. +type Server struct { + // Address to listen on, ":dns" if empty. + Addr string + // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one + Net string + // TCP Listener to use, this is to aid in systemd's socket activation. + Listener net.Listener + // TLS connection configuration + TLSConfig *tls.Config + // UDP "Listener" to use, this is to aid in systemd's socket activation. + PacketConn net.PacketConn + // Handler to invoke, dns.DefaultServeMux if nil. + Handler Handler + // Default buffer size to use to read incoming UDP messages. If not set + // it defaults to MinMsgSize (512 B). + UDPSize int + // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. + ReadTimeout time.Duration + // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. + WriteTimeout time.Duration + // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). + IdleTimeout func() time.Duration + // Secret(s) for Tsig map[]. + TsigSecret map[string]string + // Unsafe instructs the server to disregard any sanity checks and directly hand the message to + // the handler. It will specifically not check if the query has the QR bit not set. + Unsafe bool + // If NotifyStartedFunc is set it is called once the server has started listening. + NotifyStartedFunc func() + // DecorateReader is optional, allows customization of the process that reads raw DNS messages. + DecorateReader DecorateReader + // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. + DecorateWriter DecorateWriter + + // Graceful shutdown handling + + inFlight sync.WaitGroup + + lock sync.RWMutex + started bool +} + +// ListenAndServe starts a nameserver on the configured address in *Server. +func (srv *Server) ListenAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + addr := srv.Addr + if addr == "" { + addr = ":domain" + } + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + switch srv.Net { + case "tcp", "tcp4", "tcp6": + a, err := net.ResolveTCPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenTCP(srv.Net, a) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "tcp-tls", "tcp4-tls", "tcp6-tls": + network := "tcp" + if srv.Net == "tcp4-tls" { + network = "tcp4" + } else if srv.Net == "tcp6" { + network = "tcp6" + } + + l, err := tls.Listen(network, addr, srv.TLSConfig) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "udp", "udp4", "udp6": + a, err := net.ResolveUDPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenUDP(srv.Net, a) + if err != nil { + return err + } + if e := setUDPSocketOptions(l); e != nil { + return e + } + srv.PacketConn = l + srv.started = true + srv.lock.Unlock() + err = srv.serveUDP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + } + return &Error{err: "bad network"} +} + +// ActivateAndServe starts a nameserver with the PacketConn or Listener +// configured in *Server. Its main use is to start a server from systemd. +func (srv *Server) ActivateAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + pConn := srv.PacketConn + l := srv.Listener + if pConn != nil { + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + if t, ok := pConn.(*net.UDPConn); ok { + if e := setUDPSocketOptions(t); e != nil { + return e + } + srv.started = true + srv.lock.Unlock() + e := srv.serveUDP(t) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + } + if l != nil { + srv.started = true + srv.lock.Unlock() + e := srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + return &Error{err: "bad listeners"} +} + +// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and +// ActivateAndServe will return. All in progress queries are completed before the server +// is taken down. If the Shutdown is taking longer than the reading timeout an error +// is returned. +func (srv *Server) Shutdown() error { + srv.lock.Lock() + if !srv.started { + srv.lock.Unlock() + return &Error{err: "server not started"} + } + srv.started = false + srv.lock.Unlock() + + if srv.PacketConn != nil { + srv.PacketConn.Close() + } + if srv.Listener != nil { + srv.Listener.Close() + } + + fin := make(chan bool) + go func() { + srv.inFlight.Wait() + fin <- true + }() + + select { + case <-time.After(srv.getReadTimeout()): + return &Error{err: "server shutdown is pending"} + case <-fin: + return nil + } +} + +// getReadTimeout is a helper func to use system timeout if server did not intend to change it. +func (srv *Server) getReadTimeout() time.Duration { + rtimeout := dnsTimeout + if srv.ReadTimeout != 0 { + rtimeout = srv.ReadTimeout + } + return rtimeout +} + +// serveTCP starts a TCP listener for the server. +// Each request is handled in a separate goroutine. +func (srv *Server) serveTCP(l net.Listener) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + rtimeout := srv.getReadTimeout() + // deadline is not used here + for { + rw, err := l.Accept() + if err != nil { + if neterr, ok := err.(net.Error); ok && neterr.Temporary() { + continue + } + return err + } + m, err := reader.ReadTCP(rw, rtimeout) + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + continue + } + srv.inFlight.Add(1) + go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw) + } +} + +// serveUDP starts a UDP listener for the server. +// Each request is handled in a separate goroutine. +func (srv *Server) serveUDP(l *net.UDPConn) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + rtimeout := srv.getReadTimeout() + // deadline is not used here + for { + m, s, err := reader.ReadUDP(l, rtimeout) + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + continue + } + srv.inFlight.Add(1) + go srv.serve(s.RemoteAddr(), handler, m, l, s, nil) + } +} + +// Serve a new connection. +func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) { + defer srv.inFlight.Done() + + w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s} + if srv.DecorateWriter != nil { + w.writer = srv.DecorateWriter(w) + } else { + w.writer = w + } + + q := 0 // counter for the amount of TCP queries we get + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } +Redo: + req := new(Msg) + err := req.Unpack(m) + if err != nil { // Send a FormatError back + x := new(Msg) + x.SetRcodeFormatError(req) + w.WriteMsg(x) + goto Exit + } + if !srv.Unsafe && req.Response { + goto Exit + } + + w.tsigStatus = nil + if w.tsigSecret != nil { + if t := req.IsTsig(); t != nil { + secret := t.Hdr.Name + if _, ok := w.tsigSecret[secret]; !ok { + w.tsigStatus = ErrKeyAlg + } + w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false) + w.tsigTimersOnly = false + w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC + } + } + h.ServeDNS(w, req) // Writes back to the client + +Exit: + if w.tcp == nil { + return + } + // TODO(miek): make this number configurable? + if q > maxTCPQueries { // close socket after this many queries + w.Close() + return + } + + if w.hijacked { + return // client calls Close() + } + if u != nil { // UDP, "close" and return + w.Close() + return + } + idleTimeout := tcpIdleTimeout + if srv.IdleTimeout != nil { + idleTimeout = srv.IdleTimeout() + } + m, err = reader.ReadTCP(w.tcp, idleTimeout) + if err == nil { + q++ + goto Redo + } + w.Close() + return +} + +func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + l := make([]byte, 2) + n, err := conn.Read(l) + if err != nil || n != 2 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + length := binary.BigEndian.Uint16(l) + if length == 0 { + return nil, ErrShortRead + } + m := make([]byte, int(length)) + n, err = conn.Read(m[:int(length)]) + if err != nil || n == 0 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + i := n + for i < int(length) { + j, err := conn.Read(m[i:int(length)]) + if err != nil { + return nil, err + } + i += j + } + n = i + m = m[:n] + return m, nil +} + +func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + m := make([]byte, srv.UDPSize) + n, s, err := ReadFromSessionUDP(conn, m) + if err != nil || n == 0 { + if err != nil { + return nil, nil, err + } + return nil, nil, ErrShortRead + } + m = m[:n] + return m, s, nil +} + +// WriteMsg implements the ResponseWriter.WriteMsg method. +func (w *response) WriteMsg(m *Msg) (err error) { + var data []byte + if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) + if t := m.IsTsig(); t != nil { + data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err + } + } + data, err = m.Pack() + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err +} + +// Write implements the ResponseWriter.Write method. +func (w *response) Write(m []byte) (int, error) { + switch { + case w.udp != nil: + n, err := WriteToSessionUDP(w.udp, m, w.udpSession) + return n, err + case w.tcp != nil: + lm := len(m) + if lm < 2 { + return 0, io.ErrShortBuffer + } + if lm > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, 2+lm) + binary.BigEndian.PutUint16(l, uint16(lm)) + m = append(l, m...) + + n, err := io.Copy(w.tcp, bytes.NewReader(m)) + return int(n), err + } + panic("not reached") +} + +// LocalAddr implements the ResponseWriter.LocalAddr method. +func (w *response) LocalAddr() net.Addr { + if w.tcp != nil { + return w.tcp.LocalAddr() + } + return w.udp.LocalAddr() +} + +// RemoteAddr implements the ResponseWriter.RemoteAddr method. +func (w *response) RemoteAddr() net.Addr { return w.remoteAddr } + +// TsigStatus implements the ResponseWriter.TsigStatus method. +func (w *response) TsigStatus() error { return w.tsigStatus } + +// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. +func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } + +// Hijack implements the ResponseWriter.Hijack method. +func (w *response) Hijack() { w.hijacked = true } + +// Close implements the ResponseWriter.Close method +func (w *response) Close() error { + // Can't close the udp conn, as that is actually the listener. + if w.tcp != nil { + e := w.tcp.Close() + w.tcp = nil + return e + } + return nil +} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go new file mode 100644 index 000000000..2dce06af8 --- /dev/null +++ b/vendor/github.com/miekg/dns/sig0.go @@ -0,0 +1,219 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "encoding/binary" + "math/big" + "strings" + "time" +) + +// Sign signs a dns.Msg. It fills the signature with the appropriate data. +// The SIG record should have the SignerName, KeyTag, Algorithm, Inception +// and Expiration set. +func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { + if k == nil { + return nil, ErrPrivKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return nil, ErrKey + } + rr.Header().Rrtype = TypeSIG + rr.Header().Class = ClassANY + rr.Header().Ttl = 0 + rr.Header().Name = "." + rr.OrigTtl = 0 + rr.TypeCovered = 0 + rr.Labels = 0 + + buf := make([]byte, m.Len()+rr.len()) + mbuf, err := m.PackBuffer(buf) + if err != nil { + return nil, err + } + if &buf[0] != &mbuf[0] { + return nil, ErrBuf + } + off, err := PackRR(rr, buf, len(mbuf), nil, false) + if err != nil { + return nil, err + } + buf = buf[:off:cap(buf)] + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return nil, ErrAlg + } + + hasher := hash.New() + // Write SIG rdata + hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) + // Write message + hasher.Write(buf[:len(mbuf)]) + + signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) + if err != nil { + return nil, err + } + + rr.Signature = toBase64(signature) + sig := string(signature) + + buf = append(buf, sig...) + if len(buf) > int(^uint16(0)) { + return nil, ErrBuf + } + // Adjust sig data length + rdoff := len(mbuf) + 1 + 2 + 2 + 4 + rdlen := binary.BigEndian.Uint16(buf[rdoff:]) + rdlen += uint16(len(sig)) + binary.BigEndian.PutUint16(buf[rdoff:], rdlen) + // Adjust additional count + adc := binary.BigEndian.Uint16(buf[10:]) + adc++ + binary.BigEndian.PutUint16(buf[10:], adc) + return buf, nil +} + +// Verify validates the message buf using the key k. +// It's assumed that buf is a valid message from which rr was unpacked. +func (rr *SIG) Verify(k *KEY, buf []byte) error { + if k == nil { + return ErrKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + var hash crypto.Hash + switch rr.Algorithm { + case DSA, RSASHA1: + hash = crypto.SHA1 + case RSASHA256, ECDSAP256SHA256: + hash = crypto.SHA256 + case ECDSAP384SHA384: + hash = crypto.SHA384 + case RSASHA512: + hash = crypto.SHA512 + default: + return ErrAlg + } + hasher := hash.New() + + buflen := len(buf) + qdc := binary.BigEndian.Uint16(buf[4:]) + anc := binary.BigEndian.Uint16(buf[6:]) + auc := binary.BigEndian.Uint16(buf[8:]) + adc := binary.BigEndian.Uint16(buf[10:]) + offset := 12 + var err error + for i := uint16(0); i < qdc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type and Class + offset += 2 + 2 + } + for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type, Class and TTL + offset += 2 + 2 + 4 + if offset+1 >= buflen { + continue + } + var rdlen uint16 + rdlen = binary.BigEndian.Uint16(buf[offset:]) + offset += 2 + offset += int(rdlen) + } + if offset >= buflen { + return &Error{err: "overflowing unpacking signed message"} + } + + // offset should be just prior to SIG + bodyend := offset + // owner name SHOULD be root + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip Type, Class, TTL, RDLen + offset += 2 + 2 + 4 + 2 + sigstart := offset + // Skip Type Covered, Algorithm, Labels, Original TTL + offset += 2 + 1 + 1 + 4 + if offset+4+4 >= buflen { + return &Error{err: "overflow unpacking signed message"} + } + expire := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + incept := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + now := uint32(time.Now().Unix()) + if now < incept || now > expire { + return ErrTime + } + // Skip key tag + offset += 2 + var signername string + signername, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // If key has come from the DNS name compression might + // have mangled the case of the name + if strings.ToLower(signername) != strings.ToLower(k.Header().Name) { + return &Error{err: "signer name doesn't match key name"} + } + sigend := offset + hasher.Write(buf[sigstart:sigend]) + hasher.Write(buf[:10]) + hasher.Write([]byte{ + byte((adc - 1) << 8), + byte(adc - 1), + }) + hasher.Write(buf[12:bodyend]) + + hashed := hasher.Sum(nil) + sig := buf[sigend:] + switch k.Algorithm { + case DSA: + pk := k.publicKeyDSA() + sig = sig[1:] + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if dsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + case RSASHA1, RSASHA256, RSASHA512: + pk := k.publicKeyRSA() + if pk != nil { + return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) + } + case ECDSAP256SHA256, ECDSAP384SHA384: + pk := k.publicKeyECDSA() + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if ecdsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + } + return ErrKeyAlg +} diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go new file mode 100644 index 000000000..9573c7d0b --- /dev/null +++ b/vendor/github.com/miekg/dns/singleinflight.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Adapted for dns package usage by Miek Gieben. + +package dns + +import "sync" +import "time" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + val *Msg + rtt time.Duration + err error + dups int +} + +// singleflight represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type singleflight struct { + sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { + g.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.Unlock() + c.wg.Wait() + return c.val, c.rtt, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.Unlock() + + c.val, c.rtt, c.err = fn() + c.wg.Done() + + g.Lock() + delete(g.m, key) + g.Unlock() + + return c.val, c.rtt, c.err, c.dups > 0 +} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go new file mode 100644 index 000000000..34fe6615a --- /dev/null +++ b/vendor/github.com/miekg/dns/tlsa.go @@ -0,0 +1,86 @@ +package dns + +import ( + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/hex" + "errors" + "io" + "net" + "strconv" +) + +// CertificateToDANE converts a certificate to a hex string as used in the TLSA record. +func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { + switch matchingType { + case 0: + switch selector { + case 0: + return hex.EncodeToString(cert.Raw), nil + case 1: + return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil + } + case 1: + h := sha256.New() + switch selector { + case 0: + io.WriteString(h, string(cert.Raw)) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) + return hex.EncodeToString(h.Sum(nil)), nil + } + case 2: + h := sha512.New() + switch selector { + case 0: + io.WriteString(h, string(cert.Raw)) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) + return hex.EncodeToString(h.Sum(nil)), nil + } + } + return "", errors.New("dns: bad TLSA MatchingType or TLSA Selector") +} + +// Sign creates a TLSA record from an SSL certificate. +func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeTLSA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err + } + return nil +} + +// Verify verifies a TLSA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *TLSA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// TLSAName returns the ownername of a TLSA resource record as per the +// rules specified in RFC 6698, Section 3. +func TLSAName(name, service, network string) (string, error) { + if !IsFqdn(name) { + return "", ErrFqdn + } + p, err := net.LookupPort(network, service) + if err != nil { + return "", err + } + return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil +} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go new file mode 100644 index 000000000..78365e1c5 --- /dev/null +++ b/vendor/github.com/miekg/dns/tsig.go @@ -0,0 +1,384 @@ +package dns + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + "encoding/hex" + "hash" + "io" + "strconv" + "strings" + "time" +) + +// HMAC hashing codes. These are transmitted as domain names. +const ( + HmacMD5 = "hmac-md5.sig-alg.reg.int." + HmacSHA1 = "hmac-sha1." + HmacSHA256 = "hmac-sha256." + HmacSHA512 = "hmac-sha512." +) + +// TSIG is the RR the holds the transaction signature of a message. +// See RFC 2845 and RFC 4635. +type TSIG struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` + OrigId uint16 + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TSIG has no official presentation format, but this will suffice. + +func (rr *TSIG) String() string { + s := "\n;; TSIG PSEUDOSECTION:\n" + s += rr.Hdr.String() + + " " + rr.Algorithm + + " " + tsigTimeToString(rr.TimeSigned) + + " " + strconv.Itoa(int(rr.Fudge)) + + " " + strconv.Itoa(int(rr.MACSize)) + + " " + strings.ToUpper(rr.MAC) + + " " + strconv.Itoa(int(rr.OrigId)) + + " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR + " " + strconv.Itoa(int(rr.OtherLen)) + + " " + rr.OtherData + return s +} + +// The following values must be put in wireformat, so that the MAC can be calculated. +// RFC 2845, section 3.4.2. TSIG Variables. +type tsigWireFmt struct { + // From RR_Header + Name string `dns:"domain-name"` + Class uint16 + Ttl uint32 + // Rdata of the TSIG + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + // MACSize, MAC and OrigId excluded + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC +type macWireFmt struct { + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` +} + +// 3.3. Time values used in TSIG calculations +type timerWireFmt struct { + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 +} + +// TsigGenerate fills out the TSIG record attached to the message. +// The message should contain +// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), +// time fudge (defaults to 300 seconds) and the current time +// The TSIG MAC is saved in that Tsig RR. +// When TsigGenerate is called for the first time requestMAC is set to the empty string and +// timersOnly is false. +// If something goes wrong an error is returned, otherwise it is nil. +func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { + if m.IsTsig() == nil { + panic("dns: TSIG not last RR in additional") + } + // If we barf here, the caller is to blame + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return nil, "", err + } + + rr := m.Extra[len(m.Extra)-1].(*TSIG) + m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg + mbuf, err := m.Pack() + if err != nil { + return nil, "", err + } + buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) + + t := new(TSIG) + var h hash.Hash + switch strings.ToLower(rr.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, []byte(rawsecret)) + case HmacSHA1: + h = hmac.New(sha1.New, []byte(rawsecret)) + case HmacSHA256: + h = hmac.New(sha256.New, []byte(rawsecret)) + case HmacSHA512: + h = hmac.New(sha512.New, []byte(rawsecret)) + default: + return nil, "", ErrKeyAlg + } + io.WriteString(h, string(buf)) + t.MAC = hex.EncodeToString(h.Sum(nil)) + t.MACSize = uint16(len(t.MAC) / 2) // Size is half! + + t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} + t.Fudge = rr.Fudge + t.TimeSigned = rr.TimeSigned + t.Algorithm = rr.Algorithm + t.OrigId = m.Id + + tbuf := make([]byte, t.len()) + if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { + tbuf = tbuf[:off] // reset to actual size used + } else { + return nil, "", err + } + mbuf = append(mbuf, tbuf...) + // Update the ArCount directly in the buffer. + binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) + + return mbuf, t.MAC, nil +} + +// TsigVerify verifies the TSIG on a message. +// If the signature does not validate err contains the +// error, otherwise it is nil. +func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return err + } + // Strip the TSIG from the incoming msg + stripped, tsig, err := stripTsig(msg) + if err != nil { + return err + } + + msgMAC, err := hex.DecodeString(tsig.MAC) + if err != nil { + return err + } + + buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) + + // Fudge factor works both ways. A message can arrive before it was signed because + // of clock skew. + now := uint64(time.Now().Unix()) + ti := now - tsig.TimeSigned + if now < tsig.TimeSigned { + ti = tsig.TimeSigned - now + } + if uint64(tsig.Fudge) < ti { + return ErrTime + } + + var h hash.Hash + switch strings.ToLower(tsig.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, rawsecret) + case HmacSHA1: + h = hmac.New(sha1.New, rawsecret) + case HmacSHA256: + h = hmac.New(sha256.New, rawsecret) + case HmacSHA512: + h = hmac.New(sha512.New, rawsecret) + default: + return ErrKeyAlg + } + h.Write(buf) + if !hmac.Equal(h.Sum(nil), msgMAC) { + return ErrSig + } + return nil +} + +// Create a wiredata buffer for the MAC calculation. +func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { + var buf []byte + if rr.TimeSigned == 0 { + rr.TimeSigned = uint64(time.Now().Unix()) + } + if rr.Fudge == 0 { + rr.Fudge = 300 // Standard (RFC) default. + } + + if requestMAC != "" { + m := new(macWireFmt) + m.MACSize = uint16(len(requestMAC) / 2) + m.MAC = requestMAC + buf = make([]byte, len(requestMAC)) // long enough + n, _ := packMacWire(m, buf) + buf = buf[:n] + } + + tsigvar := make([]byte, DefaultMsgSize) + if timersOnly { + tsig := new(timerWireFmt) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + n, _ := packTimerWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } else { + tsig := new(tsigWireFmt) + tsig.Name = strings.ToLower(rr.Hdr.Name) + tsig.Class = ClassANY + tsig.Ttl = rr.Hdr.Ttl + tsig.Algorithm = strings.ToLower(rr.Algorithm) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + tsig.Error = rr.Error + tsig.OtherLen = rr.OtherLen + tsig.OtherData = rr.OtherData + n, _ := packTsigWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } + + if requestMAC != "" { + x := append(buf, msgbuf...) + buf = append(x, tsigvar...) + } else { + buf = append(msgbuf, tsigvar...) + } + return buf +} + +// Strip the TSIG from the raw message. +func stripTsig(msg []byte) ([]byte, *TSIG, error) { + // Copied from msg.go's Unpack() Header, but modified. + var ( + dh Header + err error + ) + off, tsigoff := 0, 0 + + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return nil, nil, err + } + if dh.Arcount == 0 { + return nil, nil, ErrNoSig + } + + // Rcode, see msg.go Unpack() + if int(dh.Bits&0xF) == RcodeNotAuth { + return nil, nil, ErrAuth + } + + for i := 0; i < int(dh.Qdcount); i++ { + _, off, err = unpackQuestion(msg, off) + if err != nil { + return nil, nil, err + } + } + + _, off, err = unpackRRslice(int(dh.Ancount), msg, off) + if err != nil { + return nil, nil, err + } + _, off, err = unpackRRslice(int(dh.Nscount), msg, off) + if err != nil { + return nil, nil, err + } + + rr := new(TSIG) + var extra RR + for i := 0; i < int(dh.Arcount); i++ { + tsigoff = off + extra, off, err = UnpackRR(msg, off) + if err != nil { + return nil, nil, err + } + if extra.Header().Rrtype == TypeTSIG { + rr = extra.(*TSIG) + // Adjust Arcount. + arcount := binary.BigEndian.Uint16(msg[10:]) + binary.BigEndian.PutUint16(msg[10:], arcount-1) + break + } + } + if rr == nil { + return nil, nil, ErrNoSig + } + return msg[:tsigoff], rr, nil +} + +// Translate the TSIG time signed into a date. There is no +// need for RFC1982 calculations as this date is 48 bits. +func tsigTimeToString(t uint64) string { + ti := time.Unix(int64(t), 0).UTC() + return ti.Format("20060102150405") +} + +func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go TSIG packing + // RR_Header + off, err := PackDomainName(tw.Name, msg, 0, nil, false) + if err != nil { + return off, err + } + off, err = packUint16(tw.Class, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(tw.Ttl, msg, off) + if err != nil { + return off, err + } + + off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) + if err != nil { + return off, err + } + off, err = packUint48(tw.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + + off, err = packUint16(tw.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(tw.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packMacWire(mw *macWireFmt, msg []byte) (int, error) { + off, err := packUint16(mw.MACSize, msg, 0) + if err != nil { + return off, err + } + off, err = packStringHex(mw.MAC, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { + off, err := packUint48(tw.TimeSigned, msg, 0) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go new file mode 100644 index 000000000..5059d1a79 --- /dev/null +++ b/vendor/github.com/miekg/dns/types.go @@ -0,0 +1,1249 @@ +package dns + +import ( + "fmt" + "net" + "strconv" + "strings" + "time" +) + +type ( + // Type is a DNS type. + Type uint16 + // Class is a DNS class. + Class uint16 + // Name is a DNS domain name. + Name string +) + +// Packet formats + +// Wire constants and supported types. +const ( + // valid RR_Header.Rrtype and Question.qtype + + TypeNone uint16 = 0 + TypeA uint16 = 1 + TypeNS uint16 = 2 + TypeMD uint16 = 3 + TypeMF uint16 = 4 + TypeCNAME uint16 = 5 + TypeSOA uint16 = 6 + TypeMB uint16 = 7 + TypeMG uint16 = 8 + TypeMR uint16 = 9 + TypeNULL uint16 = 10 + TypePTR uint16 = 12 + TypeHINFO uint16 = 13 + TypeMINFO uint16 = 14 + TypeMX uint16 = 15 + TypeTXT uint16 = 16 + TypeRP uint16 = 17 + TypeAFSDB uint16 = 18 + TypeX25 uint16 = 19 + TypeISDN uint16 = 20 + TypeRT uint16 = 21 + TypeNSAPPTR uint16 = 23 + TypeSIG uint16 = 24 + TypeKEY uint16 = 25 + TypePX uint16 = 26 + TypeGPOS uint16 = 27 + TypeAAAA uint16 = 28 + TypeLOC uint16 = 29 + TypeNXT uint16 = 30 + TypeEID uint16 = 31 + TypeNIMLOC uint16 = 32 + TypeSRV uint16 = 33 + TypeATMA uint16 = 34 + TypeNAPTR uint16 = 35 + TypeKX uint16 = 36 + TypeCERT uint16 = 37 + TypeDNAME uint16 = 39 + TypeOPT uint16 = 41 // EDNS + TypeDS uint16 = 43 + TypeSSHFP uint16 = 44 + TypeRRSIG uint16 = 46 + TypeNSEC uint16 = 47 + TypeDNSKEY uint16 = 48 + TypeDHCID uint16 = 49 + TypeNSEC3 uint16 = 50 + TypeNSEC3PARAM uint16 = 51 + TypeTLSA uint16 = 52 + TypeHIP uint16 = 55 + TypeNINFO uint16 = 56 + TypeRKEY uint16 = 57 + TypeTALINK uint16 = 58 + TypeCDS uint16 = 59 + TypeCDNSKEY uint16 = 60 + TypeOPENPGPKEY uint16 = 61 + TypeSPF uint16 = 99 + TypeUINFO uint16 = 100 + TypeUID uint16 = 101 + TypeGID uint16 = 102 + TypeUNSPEC uint16 = 103 + TypeNID uint16 = 104 + TypeL32 uint16 = 105 + TypeL64 uint16 = 106 + TypeLP uint16 = 107 + TypeEUI48 uint16 = 108 + TypeEUI64 uint16 = 109 + TypeURI uint16 = 256 + TypeCAA uint16 = 257 + + TypeTKEY uint16 = 249 + TypeTSIG uint16 = 250 + + // valid Question.Qtype only + TypeIXFR uint16 = 251 + TypeAXFR uint16 = 252 + TypeMAILB uint16 = 253 + TypeMAILA uint16 = 254 + TypeANY uint16 = 255 + + TypeTA uint16 = 32768 + TypeDLV uint16 = 32769 + TypeReserved uint16 = 65535 + + // valid Question.Qclass + ClassINET = 1 + ClassCSNET = 2 + ClassCHAOS = 3 + ClassHESIOD = 4 + ClassNONE = 254 + ClassANY = 255 + + // Message Response Codes. + RcodeSuccess = 0 + RcodeFormatError = 1 + RcodeServerFailure = 2 + RcodeNameError = 3 + RcodeNotImplemented = 4 + RcodeRefused = 5 + RcodeYXDomain = 6 + RcodeYXRrset = 7 + RcodeNXRrset = 8 + RcodeNotAuth = 9 + RcodeNotZone = 10 + RcodeBadSig = 16 // TSIG + RcodeBadVers = 16 // EDNS0 + RcodeBadKey = 17 + RcodeBadTime = 18 + RcodeBadMode = 19 // TKEY + RcodeBadName = 20 + RcodeBadAlg = 21 + RcodeBadTrunc = 22 // TSIG + RcodeBadCookie = 23 // DNS Cookies + + // Message Opcodes. There is no 3. + OpcodeQuery = 0 + OpcodeIQuery = 1 + OpcodeStatus = 2 + OpcodeNotify = 4 + OpcodeUpdate = 5 +) + +// Headers is the wire format for the DNS packet header. +type Header struct { + Id uint16 + Bits uint16 + Qdcount, Ancount, Nscount, Arcount uint16 +} + +const ( + headerSize = 12 + + // Header.Bits + _QR = 1 << 15 // query/response (response=1) + _AA = 1 << 10 // authoritative + _TC = 1 << 9 // truncated + _RD = 1 << 8 // recursion desired + _RA = 1 << 7 // recursion available + _Z = 1 << 6 // Z + _AD = 1 << 5 // authticated data + _CD = 1 << 4 // checking disabled + + LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. + LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. + + LOC_HOURS = 60 * 1000 + LOC_DEGREES = 60 * LOC_HOURS + + LOC_ALTITUDEBASE = 100000 +) + +// Different Certificate Types, see RFC 4398, Section 2.1 +const ( + CertPKIX = 1 + iota + CertSPKI + CertPGP + CertIPIX + CertISPKI + CertIPGP + CertACPKIX + CertIACPKIX + CertURI = 253 + CertOID = 254 +) + +// CertTypeToString converts the Cert Type to its string representation. +// See RFC 4398 and RFC 6944. +var CertTypeToString = map[uint16]string{ + CertPKIX: "PKIX", + CertSPKI: "SPKI", + CertPGP: "PGP", + CertIPIX: "IPIX", + CertISPKI: "ISPKI", + CertIPGP: "IPGP", + CertACPKIX: "ACPKIX", + CertIACPKIX: "IACPKIX", + CertURI: "URI", + CertOID: "OID", +} + +// StringToCertType is the reverseof CertTypeToString. +var StringToCertType = reverseInt16(CertTypeToString) + +//go:generate go run types_generate.go + +// Question holds a DNS question. There can be multiple questions in the +// question section of a message. Usually there is just one. +type Question struct { + Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) + Qtype uint16 + Qclass uint16 +} + +func (q *Question) len() int { + return len(q.Name) + 1 + 2 + 2 +} + +func (q *Question) String() (s string) { + // prefix with ; (as in dig) + s = ";" + sprintName(q.Name) + "\t" + s += Class(q.Qclass).String() + "\t" + s += " " + Type(q.Qtype).String() + return s +} + +// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY +// is named "*" there. +type ANY struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *ANY) String() string { return rr.Hdr.String() } + +type CNAME struct { + Hdr RR_Header + Target string `dns:"cdomain-name"` +} + +func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } + +type HINFO struct { + Hdr RR_Header + Cpu string + Os string +} + +func (rr *HINFO) String() string { + return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) +} + +type MB struct { + Hdr RR_Header + Mb string `dns:"cdomain-name"` +} + +func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } + +type MG struct { + Hdr RR_Header + Mg string `dns:"cdomain-name"` +} + +func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } + +type MINFO struct { + Hdr RR_Header + Rmail string `dns:"cdomain-name"` + Email string `dns:"cdomain-name"` +} + +func (rr *MINFO) String() string { + return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) +} + +type MR struct { + Hdr RR_Header + Mr string `dns:"cdomain-name"` +} + +func (rr *MR) String() string { + return rr.Hdr.String() + sprintName(rr.Mr) +} + +type MF struct { + Hdr RR_Header + Mf string `dns:"cdomain-name"` +} + +func (rr *MF) String() string { + return rr.Hdr.String() + sprintName(rr.Mf) +} + +type MD struct { + Hdr RR_Header + Md string `dns:"cdomain-name"` +} + +func (rr *MD) String() string { + return rr.Hdr.String() + sprintName(rr.Md) +} + +type MX struct { + Hdr RR_Header + Preference uint16 + Mx string `dns:"cdomain-name"` +} + +func (rr *MX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) +} + +type AFSDB struct { + Hdr RR_Header + Subtype uint16 + Hostname string `dns:"cdomain-name"` +} + +func (rr *AFSDB) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) +} + +type X25 struct { + Hdr RR_Header + PSDNAddress string +} + +func (rr *X25) String() string { + return rr.Hdr.String() + rr.PSDNAddress +} + +type RT struct { + Hdr RR_Header + Preference uint16 + Host string `dns:"cdomain-name"` +} + +func (rr *RT) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) +} + +type NS struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` +} + +func (rr *NS) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) +} + +type PTR struct { + Hdr RR_Header + Ptr string `dns:"cdomain-name"` +} + +func (rr *PTR) String() string { + return rr.Hdr.String() + sprintName(rr.Ptr) +} + +type RP struct { + Hdr RR_Header + Mbox string `dns:"domain-name"` + Txt string `dns:"domain-name"` +} + +func (rr *RP) String() string { + return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) +} + +type SOA struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` + Mbox string `dns:"cdomain-name"` + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + Minttl uint32 +} + +func (rr *SOA) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + + " " + strconv.FormatInt(int64(rr.Serial), 10) + + " " + strconv.FormatInt(int64(rr.Refresh), 10) + + " " + strconv.FormatInt(int64(rr.Retry), 10) + + " " + strconv.FormatInt(int64(rr.Expire), 10) + + " " + strconv.FormatInt(int64(rr.Minttl), 10) +} + +type TXT struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +func sprintName(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + dst = appendDomainNameByte(dst, b) + } + i += n + } + } + return string(dst) +} + +func sprintTxtOctet(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + dst = append(dst, '"') + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + if b < ' ' || b > '~' { + dst = appendByte(dst, b) + } else { + dst = append(dst, b) + } + } + i += n + } + } + dst = append(dst, '"') + return string(dst) +} + +func sprintTxt(txt []string) string { + var out []byte + for i, s := range txt { + if i > 0 { + out = append(out, ` "`...) + } else { + out = append(out, '"') + } + bs := []byte(s) + for j := 0; j < len(bs); { + b, n := nextByte(bs, j) + if n == 0 { + break + } + out = appendTXTStringByte(out, b) + j += n + } + out = append(out, '"') + } + return string(out) +} + +func appendDomainNameByte(s []byte, b byte) []byte { + switch b { + case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape + return append(s, '\\', b) + } + return appendTXTStringByte(s, b) +} + +func appendTXTStringByte(s []byte, b byte) []byte { + switch b { + case '\t': + return append(s, '\\', 't') + case '\r': + return append(s, '\\', 'r') + case '\n': + return append(s, '\\', 'n') + case '"', '\\': + return append(s, '\\', b) + } + if b < ' ' || b > '~' { + return appendByte(s, b) + } + return append(s, b) +} + +func appendByte(s []byte, b byte) []byte { + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + return s +} + +func nextByte(b []byte, offset int) (byte, int) { + if offset >= len(b) { + return 0, 0 + } + if b[offset] != '\\' { + // not an escape sequence + return b[offset], 1 + } + switch len(b) - offset { + case 1: // dangling escape + return 0, 0 + case 2, 3: // too short to be \ddd + default: // maybe \ddd + if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) { + return dddToByte(b[offset+1:]), 4 + } + } + // not \ddd, maybe a control char + switch b[offset+1] { + case 't': + return '\t', 2 + case 'r': + return '\r', 2 + case 'n': + return '\n', 2 + default: + return b[offset+1], 2 + } +} + +type SPF struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +type SRV struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Port uint16 + Target string `dns:"domain-name"` +} + +func (rr *SRV) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Priority)) + " " + + strconv.Itoa(int(rr.Weight)) + " " + + strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) +} + +type NAPTR struct { + Hdr RR_Header + Order uint16 + Preference uint16 + Flags string + Service string + Regexp string + Replacement string `dns:"domain-name"` +} + +func (rr *NAPTR) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Order)) + " " + + strconv.Itoa(int(rr.Preference)) + " " + + "\"" + rr.Flags + "\" " + + "\"" + rr.Service + "\" " + + "\"" + rr.Regexp + "\" " + + rr.Replacement +} + +// The CERT resource record, see RFC 4398. +type CERT struct { + Hdr RR_Header + Type uint16 + KeyTag uint16 + Algorithm uint8 + Certificate string `dns:"base64"` +} + +func (rr *CERT) String() string { + var ( + ok bool + certtype, algorithm string + ) + if certtype, ok = CertTypeToString[rr.Type]; !ok { + certtype = strconv.Itoa(int(rr.Type)) + } + if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { + algorithm = strconv.Itoa(int(rr.Algorithm)) + } + return rr.Hdr.String() + certtype + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + algorithm + + " " + rr.Certificate +} + +// The DNAME resource record, see RFC 2672. +type DNAME struct { + Hdr RR_Header + Target string `dns:"domain-name"` +} + +func (rr *DNAME) String() string { + return rr.Hdr.String() + sprintName(rr.Target) +} + +type A struct { + Hdr RR_Header + A net.IP `dns:"a"` +} + +func (rr *A) String() string { + if rr.A == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.A.String() +} + +type AAAA struct { + Hdr RR_Header + AAAA net.IP `dns:"aaaa"` +} + +func (rr *AAAA) String() string { + if rr.AAAA == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.AAAA.String() +} + +type PX struct { + Hdr RR_Header + Preference uint16 + Map822 string `dns:"domain-name"` + Mapx400 string `dns:"domain-name"` +} + +func (rr *PX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) +} + +type GPOS struct { + Hdr RR_Header + Longitude string + Latitude string + Altitude string +} + +func (rr *GPOS) String() string { + return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude +} + +type LOC struct { + Hdr RR_Header + Version uint8 + Size uint8 + HorizPre uint8 + VertPre uint8 + Latitude uint32 + Longitude uint32 + Altitude uint32 +} + +// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent +// format and returns a string in m (two decimals for the cm) +func cmToM(m, e uint8) string { + if e < 2 { + if e == 1 { + m *= 10 + } + + return fmt.Sprintf("0.%02d", m) + } + + s := fmt.Sprintf("%d", m) + for e > 2 { + s += "0" + e-- + } + return s +} + +func (rr *LOC) String() string { + s := rr.Hdr.String() + + lat := rr.Latitude + ns := "N" + if lat > LOC_EQUATOR { + lat = lat - LOC_EQUATOR + } else { + ns = "S" + lat = LOC_EQUATOR - lat + } + h := lat / LOC_DEGREES + lat = lat % LOC_DEGREES + m := lat / LOC_HOURS + lat = lat % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns) + + lon := rr.Longitude + ew := "E" + if lon > LOC_PRIMEMERIDIAN { + lon = lon - LOC_PRIMEMERIDIAN + } else { + ew = "W" + lon = LOC_PRIMEMERIDIAN - lon + } + h = lon / LOC_DEGREES + lon = lon % LOC_DEGREES + m = lon / LOC_HOURS + lon = lon % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew) + + var alt = float64(rr.Altitude) / 100 + alt -= LOC_ALTITUDEBASE + if rr.Altitude%100 != 0 { + s += fmt.Sprintf("%.2fm ", alt) + } else { + s += fmt.Sprintf("%.0fm ", alt) + } + + s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " + s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " + s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" + + return s +} + +// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931. +type SIG struct { + RRSIG +} + +type RRSIG struct { + Hdr RR_Header + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + Signature string `dns:"base64"` +} + +func (rr *RRSIG) String() string { + s := rr.Hdr.String() + s += Type(rr.TypeCovered).String() + s += " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Labels)) + + " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + + " " + TimeToString(rr.Expiration) + + " " + TimeToString(rr.Inception) + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + sprintName(rr.SignerName) + + " " + rr.Signature + return s +} + +type NSEC struct { + Hdr RR_Header + NextDomain string `dns:"domain-name"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC) String() string { + s := rr.Hdr.String() + sprintName(rr.NextDomain) + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC) len() int { + l := rr.Hdr.len() + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +type DLV struct { + DS +} + +type CDS struct { + DS +} + +type DS struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *DS) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +type KX struct { + Hdr RR_Header + Preference uint16 + Exchanger string `dns:"domain-name"` +} + +func (rr *KX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + sprintName(rr.Exchanger) +} + +type TA struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *TA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +type TALINK struct { + Hdr RR_Header + PreviousName string `dns:"domain-name"` + NextName string `dns:"domain-name"` +} + +func (rr *TALINK) String() string { + return rr.Hdr.String() + + sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) +} + +type SSHFP struct { + Hdr RR_Header + Algorithm uint8 + Type uint8 + FingerPrint string `dns:"hex"` +} + +func (rr *SSHFP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Type)) + + " " + strings.ToUpper(rr.FingerPrint) +} + +type KEY struct { + DNSKEY +} + +type CDNSKEY struct { + DNSKEY +} + +type DNSKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *DNSKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +type RKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *RKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +type NSAPPTR struct { + Hdr RR_Header + Ptr string `dns:"domain-name"` +} + +func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } + +type NSEC3 struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` + HashLength uint8 + NextDomain string `dns:"size-base32:HashLength"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC3) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + + " " + rr.NextDomain + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC3) len() int { + l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +type NSEC3PARAM struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` +} + +func (rr *NSEC3PARAM) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + return s +} + +type TKEY struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + Inception uint32 + Expiration uint32 + Mode uint16 + Error uint16 + KeySize uint16 + Key string + OtherLen uint16 + OtherData string +} + +func (rr *TKEY) String() string { + // It has no presentation format + return "" +} + +// RFC3597 represents an unknown/generic RR. +type RFC3597 struct { + Hdr RR_Header + Rdata string `dns:"hex"` +} + +func (rr *RFC3597) String() string { + // Let's call it a hack + s := rfc3597Header(rr.Hdr) + + s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata + return s +} + +func rfc3597Header(h RR_Header) string { + var s string + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" + s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" + return s +} + +type URI struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Target string `dns:"octet"` +} + +func (rr *URI) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + + " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) +} + +type DHCID struct { + Hdr RR_Header + Digest string `dns:"base64"` +} + +func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } + +type TLSA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *TLSA) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + " " + rr.Certificate +} + +type HIP struct { + Hdr RR_Header + HitLength uint8 + PublicKeyAlgorithm uint8 + PublicKeyLength uint16 + Hit string `dns:"size-hex:HitLength"` + PublicKey string `dns:"size-base64:PublicKeyLength"` + RendezvousServers []string `dns:"domain-name"` +} + +func (rr *HIP) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + + " " + rr.Hit + + " " + rr.PublicKey + for _, d := range rr.RendezvousServers { + s += " " + sprintName(d) + } + return s +} + +type NINFO struct { + Hdr RR_Header + ZSData []string `dns:"txt"` +} + +func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } + +type NID struct { + Hdr RR_Header + Preference uint16 + NodeID uint64 +} + +func (rr *NID) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16x", rr.NodeID) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +type L32 struct { + Hdr RR_Header + Preference uint16 + Locator32 net.IP `dns:"a"` +} + +func (rr *L32) String() string { + if rr.Locator32 == nil { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + } + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + rr.Locator32.String() +} + +type L64 struct { + Hdr RR_Header + Preference uint16 + Locator64 uint64 +} + +func (rr *L64) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16X", rr.Locator64) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +type LP struct { + Hdr RR_Header + Preference uint16 + Fqdn string `dns:"domain-name"` +} + +func (rr *LP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) +} + +type EUI48 struct { + Hdr RR_Header + Address uint64 `dns:"uint48"` +} + +func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } + +type EUI64 struct { + Hdr RR_Header + Address uint64 +} + +func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } + +type CAA struct { + Hdr RR_Header + Flag uint8 + Tag string + Value string `dns:"octet"` +} + +func (rr *CAA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) +} + +type UID struct { + Hdr RR_Header + Uid uint32 +} + +func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } + +type GID struct { + Hdr RR_Header + Gid uint32 +} + +func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } + +type UINFO struct { + Hdr RR_Header + Uinfo string +} + +func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } + +type EID struct { + Hdr RR_Header + Endpoint string `dns:"hex"` +} + +func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } + +type NIMLOC struct { + Hdr RR_Header + Locator string `dns:"hex"` +} + +func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } + +type OPENPGPKEY struct { + Hdr RR_Header + PublicKey string `dns:"base64"` +} + +func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } + +// TimeToString translates the RRSIG's incep. and expir. times to the +// string representation used when printing the record. +// It takes serial arithmetic (RFC 1982) into account. +func TimeToString(t uint32) string { + mod := ((int64(t) - time.Now().Unix()) / year68) - 1 + if mod < 0 { + mod = 0 + } + ti := time.Unix(int64(t)-(mod*year68), 0).UTC() + return ti.Format("20060102150405") +} + +// StringToTime translates the RRSIG's incep. and expir. times from +// string values like "20110403154150" to an 32 bit integer. +// It takes serial arithmetic (RFC 1982) into account. +func StringToTime(s string) (uint32, error) { + t, err := time.Parse("20060102150405", s) + if err != nil { + return 0, err + } + mod := (t.Unix() / year68) - 1 + if mod < 0 { + mod = 0 + } + return uint32(t.Unix() - (mod * year68)), nil +} + +// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. +func saltToString(s string) string { + if len(s) == 0 { + return "-" + } + return strings.ToUpper(s) +} + +func euiToString(eui uint64, bits int) (hex string) { + switch bits { + case 64: + hex = fmt.Sprintf("%16.16x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] + case 48: + hex = fmt.Sprintf("%12.12x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + } + return +} + +// copyIP returns a copy of ip. +func copyIP(ip net.IP) net.IP { + p := make(net.IP, len(ip)) + copy(p, ip) + return p +} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go new file mode 100644 index 000000000..bf80da329 --- /dev/null +++ b/vendor/github.com/miekg/dns/types_generate.go @@ -0,0 +1,271 @@ +//+build ignore + +// types_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate conversion tables (TypeToRR and TypeToString) and banal +// methods (len, Header, copy) based on the struct tags. The generated source is +// written to ztypes.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" + "text/template" +) + +var skipLen = map[string]struct{}{ + "NSEC": {}, + "NSEC3": {}, + "OPT": {}, +} + +var packageHdr = ` +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from type_generate.go + +package dns + +import ( + "encoding/base64" + "net" +) + +` + +var TypeToRR = template.Must(template.New("TypeToRR").Parse(` +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ +{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, +{{end}}{{end}} } + +`)) + +var typeToString = template.Must(template.New("typeToString").Parse(` +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ +{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", +{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", +} + +`)) + +var headerFunc = template.Must(template.New("headerFunc").Parse(` +// Header() functions +{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } +{{end}} + +`)) + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect constants like TypeX + var numberedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + b, ok := o.Type().(*types.Basic) + if !ok || b.Kind() != types.Uint16 { + continue + } + if !strings.HasPrefix(o.Name(), "Type") { + continue + } + name := strings.TrimPrefix(o.Name(), "Type") + if name == "PrivateRR" { + continue + } + numberedTypes = append(numberedTypes, name) + } + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + // Generate TypeToRR + fatalIfErr(TypeToRR.Execute(b, namedTypes)) + + // Generate typeToString + fatalIfErr(typeToString.Execute(b, numberedTypes)) + + // Generate headerFunc + fatalIfErr(headerFunc.Execute(b, namedTypes)) + + // Generate len() + fmt.Fprint(b, "// len() functions\n") + for _, name := range namedTypes { + if _, ok := skipLen[name]; ok { + continue + } + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) len() int {\n", name) + fmt.Fprintf(b, "l := rr.Hdr.len()\n") + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: + // ignored + case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`: + o("for _, x := range rr.%s { l += len(x) + 1 }\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: + // ignored + case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`: + o("l += len(rr.%s) + 1\n") + case st.Tag(i) == `dns:"octet"`: + o("l += len(rr.%s)\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): + fallthrough + case st.Tag(i) == `dns:"hex"`: + o("l += len(rr.%s)/2 + 1\n") + case st.Tag(i) == `dns:"a"`: + o("l += net.IPv4len // %s\n") + case st.Tag(i) == `dns:"aaaa"`: + o("l += net.IPv6len // %s\n") + case st.Tag(i) == `dns:"txt"`: + o("for _, t := range rr.%s { l += len(t) + 1 }\n") + case st.Tag(i) == `dns:"uint48"`: + o("l += 6 // %s\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("l += 1 // %s\n") + case types.Uint16: + o("l += 2 // %s\n") + case types.Uint32: + o("l += 4 // %s\n") + case types.Uint64: + o("l += 8 // %s\n") + case types.String: + o("l += len(rr.%s) + 1\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + fmt.Fprintf(b, "return l }\n") + } + + // Generate copy() + fmt.Fprint(b, "// copy() functions\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) + fields := []string{"*rr.Hdr.copyHeader()"} + for i := 1; i < st.NumFields(); i++ { + f := st.Field(i).Name() + if sl, ok := st.Field(i).Type().(*types.Slice); ok { + t := sl.Underlying().String() + t = strings.TrimPrefix(t, "[]") + if strings.Contains(t, ".") { + splits := strings.Split(t, ".") + t = splits[len(splits)-1] + } + fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", + f, t, f, f, f) + fields = append(fields, f) + continue + } + if st.Field(i).Type().String() == "net.IP" { + fields = append(fields, "copyIP(rr."+f+")") + continue + } + fields = append(fields, "rr."+f) + } + fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) + fmt.Fprintf(b, "}\n") + } + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("ztypes.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go new file mode 100644 index 000000000..c79c6c883 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp.go @@ -0,0 +1,58 @@ +// +build !windows,!plan9 + +package dns + +import ( + "net" + "syscall" +) + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// setUDPSocketOptions sets the UDP socket options. +// This function is implemented on a per platform basis. See udp_*.go for more details +func setUDPSocketOptions(conn *net.UDPConn) error { + sa, err := getUDPSocketName(conn) + if err != nil { + return err + } + switch sa.(type) { + case *syscall.SockaddrInet6: + v6only, err := getUDPSocketOptions6Only(conn) + if err != nil { + return err + } + setUDPSocketOptions6(conn) + if !v6only { + setUDPSocketOptions4(conn) + } + case *syscall.SockaddrInet4: + setUDPSocketOptions4(conn) + } + return nil +} + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, 40) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) + return n, err +} diff --git a/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/miekg/dns/udp_linux.go new file mode 100644 index 000000000..c62d21881 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_linux.go @@ -0,0 +1,73 @@ +// +build linux + +package dns + +// See: +// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and +// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/ +// +// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing +// interface, this might not always be the correct one. This code will make sure the egress +// packet's interface matched the ingress' one. + +import ( + "net" + "syscall" +) + +// setUDPSocketOptions4 prepares the v4 socket for sessions. +func setUDPSocketOptions4(conn *net.UDPConn) error { + file, err := conn.File() + if err != nil { + return err + } + if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil { + return err + } + // Calling File() above results in the connection becoming blocking, we must fix that. + // See https://github.com/miekg/dns/issues/279 + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } + return nil +} + +// setUDPSocketOptions6 prepares the v6 socket for sessions. +func setUDPSocketOptions6(conn *net.UDPConn) error { + file, err := conn.File() + if err != nil { + return err + } + if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil { + return err + } + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } + return nil +} + +// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined +// (dualstack). +func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { + file, err := conn.File() + if err != nil { + return false, err + } + // dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections + v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY) + if err != nil { + return false, err + } + return v6only == 1, nil +} + +func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { + file, err := conn.File() + if err != nil { + return nil, err + } + return syscall.Getsockname(int(file.Fd())) +} diff --git a/vendor/github.com/miekg/dns/udp_other.go b/vendor/github.com/miekg/dns/udp_other.go new file mode 100644 index 000000000..d40732441 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_other.go @@ -0,0 +1,17 @@ +// +build !linux,!plan9 + +package dns + +import ( + "net" + "syscall" +) + +// These do nothing. See udp_linux.go for an example of how to implement this. + +// We tried to adhire to some kind of naming scheme. + +func setUDPSocketOptions4(conn *net.UDPConn) error { return nil } +func setUDPSocketOptions6(conn *net.UDPConn) error { return nil } +func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil } +func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil } diff --git a/vendor/github.com/miekg/dns/udp_plan9.go b/vendor/github.com/miekg/dns/udp_plan9.go new file mode 100644 index 000000000..b794deeba --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_plan9.go @@ -0,0 +1,34 @@ +package dns + +import ( + "net" +) + +func setUDPSocketOptions(conn *net.UDPConn) error { return nil } + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, 40) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) + return n, err +} diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go new file mode 100644 index 000000000..2ce4b3300 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -0,0 +1,34 @@ +// +build windows + +package dns + +import "net" + +type SessionUDP struct { + raddr *net.UDPAddr +} + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + n, raddr, err := conn.ReadFrom(b) + if err != nil { + return n, nil, err + } + session := &SessionUDP{raddr.(*net.UDPAddr)} + return n, session, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, err := conn.WriteTo(b, session.raddr) + return n, err +} + +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// setUDPSocketOptions sets the UDP socket options. +// This function is implemented on a per platform basis. See udp_*.go for more details +func setUDPSocketOptions(conn *net.UDPConn) error { + return nil +} diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go new file mode 100644 index 000000000..e90c5c968 --- /dev/null +++ b/vendor/github.com/miekg/dns/update.go @@ -0,0 +1,106 @@ +package dns + +// NameUsed sets the RRs in the prereq section to +// "Name is in use" RRs. RFC 2136 section 2.4.4. +func (u *Msg) NameUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// NameNotUsed sets the RRs in the prereq section to +// "Name is in not use" RRs. RFC 2136 section 2.4.5. +func (u *Msg) NameNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) + } +} + +// Used sets the RRs in the prereq section to +// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. +func (u *Msg) Used(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Answer = append(u.Answer, r) + } +} + +// RRsetUsed sets the RRs in the prereq section to +// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. +func (u *Msg) RRsetUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RRsetNotUsed sets the RRs in the prereq section to +// "RRset does not exist" RRs. RFC 2136 section 2.4.3. +func (u *Msg) RRsetNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}}) + } +} + +// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. +func (u *Msg) Insert(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Ns = append(u.Ns, r) + } +} + +// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. +func (u *Msg) RemoveRRset(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 +func (u *Msg) RemoveName(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 +func (u *Msg) Remove(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = ClassNONE + r.Header().Ttl = 0 + u.Ns = append(u.Ns, r) + } +} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go new file mode 100644 index 000000000..7346deffb --- /dev/null +++ b/vendor/github.com/miekg/dns/xfr.go @@ -0,0 +1,244 @@ +package dns + +import ( + "time" +) + +// Envelope is used when doing a zone transfer with a remote server. +type Envelope struct { + RR []RR // The set of RRs in the answer section of the xfr reply message. + Error error // If something went wrong, this contains the error. +} + +// A Transfer defines parameters that are used during a zone transfer. +type Transfer struct { + *Conn + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds + TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be fully qualified + tsigTimersOnly bool +} + +// Think we need to away to stop the transfer + +// In performs an incoming transfer with the server in a. +// If you would like to set the source IP, or some other attribute +// of a Dialer for a Transfer, you can do so by specifying the attributes +// in the Transfer.Conn: +// +// d := net.Dialer{LocalAddr: transfer_source} +// con, err := d.Dial("tcp", master) +// dnscon := &dns.Conn{Conn:con} +// transfer = &dns.Transfer{Conn: dnscon} +// channel, err := transfer.In(message, master) +// +func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { + timeout := dnsTimeout + if t.DialTimeout != 0 { + timeout = t.DialTimeout + } + if t.Conn == nil { + t.Conn, err = DialTimeout("tcp", a, timeout) + if err != nil { + return nil, err + } + } + if err := t.WriteMsg(q); err != nil { + return nil, err + } + env = make(chan *Envelope) + go func() { + if q.Question[0].Qtype == TypeAXFR { + go t.inAxfr(q.Id, env) + return + } + if q.Question[0].Qtype == TypeIXFR { + go t.inIxfr(q.Id, env) + return + } + }() + return env, nil +} + +func (t *Transfer) inAxfr(id uint16, c chan *Envelope) { + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.Conn.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + first = !first + // only one answer that is SOA, receive more + if len(in.Answer) == 1 { + t.tsigTimersOnly = true + c <- &Envelope{in.Answer, nil} + continue + } + } + + if !first { + t.tsigTimersOnly = true // Subsequent envelopes use this. + if isSOALast(in) { + c <- &Envelope{in.Answer, nil} + return + } + c <- &Envelope{in.Answer, nil} + } + } +} + +func (t *Transfer) inIxfr(id uint16, c chan *Envelope) { + serial := uint32(0) // The first serial seen is the current server serial + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + // A single SOA RR signals "no changes" + if len(in.Answer) == 1 && isSOAFirst(in) { + c <- &Envelope{in.Answer, nil} + return + } + + // Check if the returned answer is ok + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + // This serial is important + serial = in.Answer[0].(*SOA).Serial + first = !first + } + + // Now we need to check each message for SOA records, to see what we need to do + if !first { + t.tsigTimersOnly = true + // If the last record in the IXFR contains the servers' SOA, we should quit + if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok { + if v.Serial == serial { + c <- &Envelope{in.Answer, nil} + return + } + } + c <- &Envelope{in.Answer, nil} + } + } +} + +// Out performs an outgoing transfer with the client connecting in w. +// Basic use pattern: +// +// ch := make(chan *dns.Envelope) +// tr := new(dns.Transfer) +// go tr.Out(w, r, ch) +// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} +// close(ch) +// w.Hijack() +// // w.Close() // Client closes connection +// +// The server is responsible for sending the correct sequence of RRs through the +// channel ch. +func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { + for x := range ch { + r := new(Msg) + // Compress? + r.SetReply(q) + r.Authoritative = true + // assume it fits TODO(miek): fix + r.Answer = append(r.Answer, x.RR...) + if err := w.WriteMsg(r); err != nil { + return err + } + } + w.TsigTimersOnly(true) + return nil +} + +// ReadMsg reads a message from the transfer connection t. +func (t *Transfer) ReadMsg() (*Msg, error) { + m := new(Msg) + p := make([]byte, MaxMsgSize) + n, err := t.Read(p) + if err != nil && n == 0 { + return nil, err + } + p = p[:n] + if err := m.Unpack(p); err != nil { + return nil, err + } + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + t.tsigRequestMAC = ts.MAC + } + return m, err +} + +// WriteMsg writes a message through the transfer connection t. +func (t *Transfer) WriteMsg(m *Msg) (err error) { + var out []byte + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return ErrSecret + } + out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + if _, err = t.Write(out); err != nil { + return err + } + return nil +} + +func isSOAFirst(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[0].Header().Rrtype == TypeSOA + } + return false +} + +func isSOALast(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA + } + return false +} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go new file mode 100644 index 000000000..346d3102d --- /dev/null +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -0,0 +1,3464 @@ +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from msg_generate.go + +package dns + +// pack*() functions + +func (rr *A) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataA(rr.A, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AAAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataAAAA(rr.AAAA, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AFSDB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Subtype, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Hostname, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Flag, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Tag, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Value, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Endpoint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI48) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint48(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint64(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Gid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Cpu, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Os, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.HitLength, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.PublicKeyLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Hit, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Exchanger, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDataA(rr.Locator32, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.Locator64, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Version, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Size, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.HorizPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.VertPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Fqdn, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mb, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MD) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Md, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mf, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mg, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Rmail, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Email, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mx, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Order, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Service, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Regexp, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Replacement, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.NodeID, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NIMLOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Locator, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.ZSData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.NextDomain, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + if rr.Salt == "-" { /* do nothing, empty salt */ + } + if err != nil { + return off, err + } + off, err = packUint8(rr.HashLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase32(rr.NextDomain, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + if rr.Salt == "-" { /* do nothing, empty salt */ + } + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPENPGPKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataOpt(rr.Option, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Map822, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mapx400, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RFC3597) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Rdata, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Txt, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Host, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Refresh, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Retry, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expire, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Minttl, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SPF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Port, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.FingerPrint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.PreviousName, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.NextName, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Mode, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeySize, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Key, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint48(rr.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Fudge, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.MACSize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.MAC, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OrigId, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TXT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Uid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Uinfo, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Target, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *X25) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.PSDNAddress, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// unpack*() functions + +func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(A) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.A, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AAAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.AAAA, off, err = unpackDataAAAA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AFSDB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Subtype, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hostname, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(ANY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + return rr, off, err +} + +func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flag, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Tag, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Value, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CERT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Type, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DHCID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DLV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI48) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Gid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GPOS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Longitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Cpu, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Os, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HIP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.HitLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyLength, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) + if err != nil { + return rr, off, err + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) + if err != nil { + return rr, off, err + } + rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Exchanger, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L32) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator32, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator64, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Version, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Size, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.HorizPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.VertPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Longitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fqdn, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mb, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MD) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Md, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mf, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mg, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rmail, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Email, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mx, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NAPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Order, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Service, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Regexp, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Replacement, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NodeID, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NIMLOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.ZSData, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSAPPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.NextDomain, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return rr, off, err + } + rr.HashLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) + if err != nil { + return rr, off, err + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3PARAM) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPENPGPKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Option, off, err = unpackDataOpt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Map822, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mapx400, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RFC3597) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Txt, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RRSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Host, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SOA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Refresh, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Retry, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expire, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Minttl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SPF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SRV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Port, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SSHFP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Type, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TALINK) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PreviousName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mode, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeySize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Key, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TLSA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TimeSigned, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fudge, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MACSize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) + if err != nil { + return rr, off, err + } + rr.OrigId, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TXT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uinfo, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(URI) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(X25) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PSDNAddress, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ + TypeA: unpackA, + TypeAAAA: unpackAAAA, + TypeAFSDB: unpackAFSDB, + TypeANY: unpackANY, + TypeCAA: unpackCAA, + TypeCDNSKEY: unpackCDNSKEY, + TypeCDS: unpackCDS, + TypeCERT: unpackCERT, + TypeCNAME: unpackCNAME, + TypeDHCID: unpackDHCID, + TypeDLV: unpackDLV, + TypeDNAME: unpackDNAME, + TypeDNSKEY: unpackDNSKEY, + TypeDS: unpackDS, + TypeEID: unpackEID, + TypeEUI48: unpackEUI48, + TypeEUI64: unpackEUI64, + TypeGID: unpackGID, + TypeGPOS: unpackGPOS, + TypeHINFO: unpackHINFO, + TypeHIP: unpackHIP, + TypeKEY: unpackKEY, + TypeKX: unpackKX, + TypeL32: unpackL32, + TypeL64: unpackL64, + TypeLOC: unpackLOC, + TypeLP: unpackLP, + TypeMB: unpackMB, + TypeMD: unpackMD, + TypeMF: unpackMF, + TypeMG: unpackMG, + TypeMINFO: unpackMINFO, + TypeMR: unpackMR, + TypeMX: unpackMX, + TypeNAPTR: unpackNAPTR, + TypeNID: unpackNID, + TypeNIMLOC: unpackNIMLOC, + TypeNINFO: unpackNINFO, + TypeNS: unpackNS, + TypeNSAPPTR: unpackNSAPPTR, + TypeNSEC: unpackNSEC, + TypeNSEC3: unpackNSEC3, + TypeNSEC3PARAM: unpackNSEC3PARAM, + TypeOPENPGPKEY: unpackOPENPGPKEY, + TypeOPT: unpackOPT, + TypePTR: unpackPTR, + TypePX: unpackPX, + TypeRKEY: unpackRKEY, + TypeRP: unpackRP, + TypeRRSIG: unpackRRSIG, + TypeRT: unpackRT, + TypeSIG: unpackSIG, + TypeSOA: unpackSOA, + TypeSPF: unpackSPF, + TypeSRV: unpackSRV, + TypeSSHFP: unpackSSHFP, + TypeTA: unpackTA, + TypeTALINK: unpackTALINK, + TypeTKEY: unpackTKEY, + TypeTLSA: unpackTLSA, + TypeTSIG: unpackTSIG, + TypeTXT: unpackTXT, + TypeUID: unpackUID, + TypeUINFO: unpackUINFO, + TypeURI: unpackURI, + TypeX25: unpackX25, +} diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go new file mode 100644 index 000000000..a4ecbb0cc --- /dev/null +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -0,0 +1,828 @@ +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from type_generate.go + +package dns + +import ( + "encoding/base64" + "net" +) + +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ + TypeA: func() RR { return new(A) }, + TypeAAAA: func() RR { return new(AAAA) }, + TypeAFSDB: func() RR { return new(AFSDB) }, + TypeANY: func() RR { return new(ANY) }, + TypeCAA: func() RR { return new(CAA) }, + TypeCDNSKEY: func() RR { return new(CDNSKEY) }, + TypeCDS: func() RR { return new(CDS) }, + TypeCERT: func() RR { return new(CERT) }, + TypeCNAME: func() RR { return new(CNAME) }, + TypeDHCID: func() RR { return new(DHCID) }, + TypeDLV: func() RR { return new(DLV) }, + TypeDNAME: func() RR { return new(DNAME) }, + TypeDNSKEY: func() RR { return new(DNSKEY) }, + TypeDS: func() RR { return new(DS) }, + TypeEID: func() RR { return new(EID) }, + TypeEUI48: func() RR { return new(EUI48) }, + TypeEUI64: func() RR { return new(EUI64) }, + TypeGID: func() RR { return new(GID) }, + TypeGPOS: func() RR { return new(GPOS) }, + TypeHINFO: func() RR { return new(HINFO) }, + TypeHIP: func() RR { return new(HIP) }, + TypeKEY: func() RR { return new(KEY) }, + TypeKX: func() RR { return new(KX) }, + TypeL32: func() RR { return new(L32) }, + TypeL64: func() RR { return new(L64) }, + TypeLOC: func() RR { return new(LOC) }, + TypeLP: func() RR { return new(LP) }, + TypeMB: func() RR { return new(MB) }, + TypeMD: func() RR { return new(MD) }, + TypeMF: func() RR { return new(MF) }, + TypeMG: func() RR { return new(MG) }, + TypeMINFO: func() RR { return new(MINFO) }, + TypeMR: func() RR { return new(MR) }, + TypeMX: func() RR { return new(MX) }, + TypeNAPTR: func() RR { return new(NAPTR) }, + TypeNID: func() RR { return new(NID) }, + TypeNIMLOC: func() RR { return new(NIMLOC) }, + TypeNINFO: func() RR { return new(NINFO) }, + TypeNS: func() RR { return new(NS) }, + TypeNSAPPTR: func() RR { return new(NSAPPTR) }, + TypeNSEC: func() RR { return new(NSEC) }, + TypeNSEC3: func() RR { return new(NSEC3) }, + TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, + TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, + TypeOPT: func() RR { return new(OPT) }, + TypePTR: func() RR { return new(PTR) }, + TypePX: func() RR { return new(PX) }, + TypeRKEY: func() RR { return new(RKEY) }, + TypeRP: func() RR { return new(RP) }, + TypeRRSIG: func() RR { return new(RRSIG) }, + TypeRT: func() RR { return new(RT) }, + TypeSIG: func() RR { return new(SIG) }, + TypeSOA: func() RR { return new(SOA) }, + TypeSPF: func() RR { return new(SPF) }, + TypeSRV: func() RR { return new(SRV) }, + TypeSSHFP: func() RR { return new(SSHFP) }, + TypeTA: func() RR { return new(TA) }, + TypeTALINK: func() RR { return new(TALINK) }, + TypeTKEY: func() RR { return new(TKEY) }, + TypeTLSA: func() RR { return new(TLSA) }, + TypeTSIG: func() RR { return new(TSIG) }, + TypeTXT: func() RR { return new(TXT) }, + TypeUID: func() RR { return new(UID) }, + TypeUINFO: func() RR { return new(UINFO) }, + TypeURI: func() RR { return new(URI) }, + TypeX25: func() RR { return new(X25) }, +} + +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ + TypeA: "A", + TypeAAAA: "AAAA", + TypeAFSDB: "AFSDB", + TypeANY: "ANY", + TypeATMA: "ATMA", + TypeAXFR: "AXFR", + TypeCAA: "CAA", + TypeCDNSKEY: "CDNSKEY", + TypeCDS: "CDS", + TypeCERT: "CERT", + TypeCNAME: "CNAME", + TypeDHCID: "DHCID", + TypeDLV: "DLV", + TypeDNAME: "DNAME", + TypeDNSKEY: "DNSKEY", + TypeDS: "DS", + TypeEID: "EID", + TypeEUI48: "EUI48", + TypeEUI64: "EUI64", + TypeGID: "GID", + TypeGPOS: "GPOS", + TypeHINFO: "HINFO", + TypeHIP: "HIP", + TypeISDN: "ISDN", + TypeIXFR: "IXFR", + TypeKEY: "KEY", + TypeKX: "KX", + TypeL32: "L32", + TypeL64: "L64", + TypeLOC: "LOC", + TypeLP: "LP", + TypeMAILA: "MAILA", + TypeMAILB: "MAILB", + TypeMB: "MB", + TypeMD: "MD", + TypeMF: "MF", + TypeMG: "MG", + TypeMINFO: "MINFO", + TypeMR: "MR", + TypeMX: "MX", + TypeNAPTR: "NAPTR", + TypeNID: "NID", + TypeNIMLOC: "NIMLOC", + TypeNINFO: "NINFO", + TypeNS: "NS", + TypeNSEC: "NSEC", + TypeNSEC3: "NSEC3", + TypeNSEC3PARAM: "NSEC3PARAM", + TypeNULL: "NULL", + TypeNXT: "NXT", + TypeNone: "None", + TypeOPENPGPKEY: "OPENPGPKEY", + TypeOPT: "OPT", + TypePTR: "PTR", + TypePX: "PX", + TypeRKEY: "RKEY", + TypeRP: "RP", + TypeRRSIG: "RRSIG", + TypeRT: "RT", + TypeReserved: "Reserved", + TypeSIG: "SIG", + TypeSOA: "SOA", + TypeSPF: "SPF", + TypeSRV: "SRV", + TypeSSHFP: "SSHFP", + TypeTA: "TA", + TypeTALINK: "TALINK", + TypeTKEY: "TKEY", + TypeTLSA: "TLSA", + TypeTSIG: "TSIG", + TypeTXT: "TXT", + TypeUID: "UID", + TypeUINFO: "UINFO", + TypeUNSPEC: "UNSPEC", + TypeURI: "URI", + TypeX25: "X25", + TypeNSAPPTR: "NSAP-PTR", +} + +// Header() functions +func (rr *A) Header() *RR_Header { return &rr.Hdr } +func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } +func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } +func (rr *ANY) Header() *RR_Header { return &rr.Hdr } +func (rr *CAA) Header() *RR_Header { return &rr.Hdr } +func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *CDS) Header() *RR_Header { return &rr.Hdr } +func (rr *CERT) Header() *RR_Header { return &rr.Hdr } +func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } +func (rr *DLV) Header() *RR_Header { return &rr.Hdr } +func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *DS) Header() *RR_Header { return &rr.Hdr } +func (rr *EID) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } +func (rr *GID) Header() *RR_Header { return &rr.Hdr } +func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } +func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *HIP) Header() *RR_Header { return &rr.Hdr } +func (rr *KEY) Header() *RR_Header { return &rr.Hdr } +func (rr *KX) Header() *RR_Header { return &rr.Hdr } +func (rr *L32) Header() *RR_Header { return &rr.Hdr } +func (rr *L64) Header() *RR_Header { return &rr.Hdr } +func (rr *LOC) Header() *RR_Header { return &rr.Hdr } +func (rr *LP) Header() *RR_Header { return &rr.Hdr } +func (rr *MB) Header() *RR_Header { return &rr.Hdr } +func (rr *MD) Header() *RR_Header { return &rr.Hdr } +func (rr *MF) Header() *RR_Header { return &rr.Hdr } +func (rr *MG) Header() *RR_Header { return &rr.Hdr } +func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *MR) Header() *RR_Header { return &rr.Hdr } +func (rr *MX) Header() *RR_Header { return &rr.Hdr } +func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NID) Header() *RR_Header { return &rr.Hdr } +func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } +func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *NS) Header() *RR_Header { return &rr.Hdr } +func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } +func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *OPT) Header() *RR_Header { return &rr.Hdr } +func (rr *PTR) Header() *RR_Header { return &rr.Hdr } +func (rr *PX) Header() *RR_Header { return &rr.Hdr } +func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } +func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *RP) Header() *RR_Header { return &rr.Hdr } +func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *RT) Header() *RR_Header { return &rr.Hdr } +func (rr *SIG) Header() *RR_Header { return &rr.Hdr } +func (rr *SOA) Header() *RR_Header { return &rr.Hdr } +func (rr *SPF) Header() *RR_Header { return &rr.Hdr } +func (rr *SRV) Header() *RR_Header { return &rr.Hdr } +func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } +func (rr *TA) Header() *RR_Header { return &rr.Hdr } +func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } +func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } +func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *TXT) Header() *RR_Header { return &rr.Hdr } +func (rr *UID) Header() *RR_Header { return &rr.Hdr } +func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *URI) Header() *RR_Header { return &rr.Hdr } +func (rr *X25) Header() *RR_Header { return &rr.Hdr } + +// len() functions +func (rr *A) len() int { + l := rr.Hdr.len() + l += net.IPv4len // A + return l +} +func (rr *AAAA) len() int { + l := rr.Hdr.len() + l += net.IPv6len // AAAA + return l +} +func (rr *AFSDB) len() int { + l := rr.Hdr.len() + l += 2 // Subtype + l += len(rr.Hostname) + 1 + return l +} +func (rr *ANY) len() int { + l := rr.Hdr.len() + return l +} +func (rr *CAA) len() int { + l := rr.Hdr.len() + l += 1 // Flag + l += len(rr.Tag) + 1 + l += len(rr.Value) + return l +} +func (rr *CERT) len() int { + l := rr.Hdr.len() + l += 2 // Type + l += 2 // KeyTag + l += 1 // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) + return l +} +func (rr *CNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DHCID) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.Digest)) + return l +} +func (rr *DNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DNSKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l += 1 // Protocol + l += 1 // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *DS) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l += 1 // Algorithm + l += 1 // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *EID) len() int { + l := rr.Hdr.len() + l += len(rr.Endpoint)/2 + 1 + return l +} +func (rr *EUI48) len() int { + l := rr.Hdr.len() + l += 6 // Address + return l +} +func (rr *EUI64) len() int { + l := rr.Hdr.len() + l += 8 // Address + return l +} +func (rr *GID) len() int { + l := rr.Hdr.len() + l += 4 // Gid + return l +} +func (rr *GPOS) len() int { + l := rr.Hdr.len() + l += len(rr.Longitude) + 1 + l += len(rr.Latitude) + 1 + l += len(rr.Altitude) + 1 + return l +} +func (rr *HINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Cpu) + 1 + l += len(rr.Os) + 1 + return l +} +func (rr *HIP) len() int { + l := rr.Hdr.len() + l += 1 // HitLength + l += 1 // PublicKeyAlgorithm + l += 2 // PublicKeyLength + l += len(rr.Hit)/2 + 1 + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + for _, x := range rr.RendezvousServers { + l += len(x) + 1 + } + return l +} +func (rr *KX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Exchanger) + 1 + return l +} +func (rr *L32) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += net.IPv4len // Locator32 + return l +} +func (rr *L64) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // Locator64 + return l +} +func (rr *LOC) len() int { + l := rr.Hdr.len() + l += 1 // Version + l += 1 // Size + l += 1 // HorizPre + l += 1 // VertPre + l += 4 // Latitude + l += 4 // Longitude + l += 4 // Altitude + return l +} +func (rr *LP) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Fqdn) + 1 + return l +} +func (rr *MB) len() int { + l := rr.Hdr.len() + l += len(rr.Mb) + 1 + return l +} +func (rr *MD) len() int { + l := rr.Hdr.len() + l += len(rr.Md) + 1 + return l +} +func (rr *MF) len() int { + l := rr.Hdr.len() + l += len(rr.Mf) + 1 + return l +} +func (rr *MG) len() int { + l := rr.Hdr.len() + l += len(rr.Mg) + 1 + return l +} +func (rr *MINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Rmail) + 1 + l += len(rr.Email) + 1 + return l +} +func (rr *MR) len() int { + l := rr.Hdr.len() + l += len(rr.Mr) + 1 + return l +} +func (rr *MX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Mx) + 1 + return l +} +func (rr *NAPTR) len() int { + l := rr.Hdr.len() + l += 2 // Order + l += 2 // Preference + l += len(rr.Flags) + 1 + l += len(rr.Service) + 1 + l += len(rr.Regexp) + 1 + l += len(rr.Replacement) + 1 + return l +} +func (rr *NID) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // NodeID + return l +} +func (rr *NIMLOC) len() int { + l := rr.Hdr.len() + l += len(rr.Locator)/2 + 1 + return l +} +func (rr *NINFO) len() int { + l := rr.Hdr.len() + for _, x := range rr.ZSData { + l += len(x) + 1 + } + return l +} +func (rr *NS) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + return l +} +func (rr *NSAPPTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *NSEC3PARAM) len() int { + l := rr.Hdr.len() + l += 1 // Hash + l += 1 // Flags + l += 2 // Iterations + l += 1 // SaltLength + l += len(rr.Salt)/2 + 1 + return l +} +func (rr *OPENPGPKEY) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *PTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *PX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Map822) + 1 + l += len(rr.Mapx400) + 1 + return l +} +func (rr *RFC3597) len() int { + l := rr.Hdr.len() + l += len(rr.Rdata)/2 + 1 + return l +} +func (rr *RKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l += 1 // Protocol + l += 1 // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *RP) len() int { + l := rr.Hdr.len() + l += len(rr.Mbox) + 1 + l += len(rr.Txt) + 1 + return l +} +func (rr *RRSIG) len() int { + l := rr.Hdr.len() + l += 2 // TypeCovered + l += 1 // Algorithm + l += 1 // Labels + l += 4 // OrigTtl + l += 4 // Expiration + l += 4 // Inception + l += 2 // KeyTag + l += len(rr.SignerName) + 1 + l += base64.StdEncoding.DecodedLen(len(rr.Signature)) + return l +} +func (rr *RT) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Host) + 1 + return l +} +func (rr *SOA) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + l += len(rr.Mbox) + 1 + l += 4 // Serial + l += 4 // Refresh + l += 4 // Retry + l += 4 // Expire + l += 4 // Minttl + return l +} +func (rr *SPF) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *SRV) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += 2 // Port + l += len(rr.Target) + 1 + return l +} +func (rr *SSHFP) len() int { + l := rr.Hdr.len() + l += 1 // Algorithm + l += 1 // Type + l += len(rr.FingerPrint)/2 + 1 + return l +} +func (rr *TA) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l += 1 // Algorithm + l += 1 // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *TALINK) len() int { + l := rr.Hdr.len() + l += len(rr.PreviousName) + 1 + l += len(rr.NextName) + 1 + return l +} +func (rr *TKEY) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 4 // Inception + l += 4 // Expiration + l += 2 // Mode + l += 2 // Error + l += 2 // KeySize + l += len(rr.Key) + 1 + l += 2 // OtherLen + l += len(rr.OtherData) + 1 + return l +} +func (rr *TLSA) len() int { + l := rr.Hdr.len() + l += 1 // Usage + l += 1 // Selector + l += 1 // MatchingType + l += len(rr.Certificate)/2 + 1 + return l +} +func (rr *TSIG) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 6 // TimeSigned + l += 2 // Fudge + l += 2 // MACSize + l += len(rr.MAC)/2 + 1 + l += 2 // OrigId + l += 2 // Error + l += 2 // OtherLen + l += len(rr.OtherData)/2 + 1 + return l +} +func (rr *TXT) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *UID) len() int { + l := rr.Hdr.len() + l += 4 // Uid + return l +} +func (rr *UINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Uinfo) + 1 + return l +} +func (rr *URI) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += len(rr.Target) + return l +} +func (rr *X25) len() int { + l := rr.Hdr.len() + l += len(rr.PSDNAddress) + 1 + return l +} + +// copy() functions +func (rr *A) copy() RR { + return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} +} +func (rr *AAAA) copy() RR { + return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} +} +func (rr *AFSDB) copy() RR { + return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} +} +func (rr *ANY) copy() RR { + return &ANY{*rr.Hdr.copyHeader()} +} +func (rr *CAA) copy() RR { + return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} +} +func (rr *CERT) copy() RR { + return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} +} +func (rr *CNAME) copy() RR { + return &CNAME{*rr.Hdr.copyHeader(), rr.Target} +} +func (rr *DHCID) copy() RR { + return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} +} +func (rr *DNAME) copy() RR { + return &DNAME{*rr.Hdr.copyHeader(), rr.Target} +} +func (rr *DNSKEY) copy() RR { + return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *DS) copy() RR { + return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *EID) copy() RR { + return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} +} +func (rr *EUI48) copy() RR { + return &EUI48{*rr.Hdr.copyHeader(), rr.Address} +} +func (rr *EUI64) copy() RR { + return &EUI64{*rr.Hdr.copyHeader(), rr.Address} +} +func (rr *GID) copy() RR { + return &GID{*rr.Hdr.copyHeader(), rr.Gid} +} +func (rr *GPOS) copy() RR { + return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} +} +func (rr *HINFO) copy() RR { + return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} +} +func (rr *HIP) copy() RR { + RendezvousServers := make([]string, len(rr.RendezvousServers)) + copy(RendezvousServers, rr.RendezvousServers) + return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} +} +func (rr *KX) copy() RR { + return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} +} +func (rr *L32) copy() RR { + return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} +} +func (rr *L64) copy() RR { + return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} +} +func (rr *LOC) copy() RR { + return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} +} +func (rr *LP) copy() RR { + return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} +} +func (rr *MB) copy() RR { + return &MB{*rr.Hdr.copyHeader(), rr.Mb} +} +func (rr *MD) copy() RR { + return &MD{*rr.Hdr.copyHeader(), rr.Md} +} +func (rr *MF) copy() RR { + return &MF{*rr.Hdr.copyHeader(), rr.Mf} +} +func (rr *MG) copy() RR { + return &MG{*rr.Hdr.copyHeader(), rr.Mg} +} +func (rr *MINFO) copy() RR { + return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} +} +func (rr *MR) copy() RR { + return &MR{*rr.Hdr.copyHeader(), rr.Mr} +} +func (rr *MX) copy() RR { + return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} +} +func (rr *NAPTR) copy() RR { + return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} +} +func (rr *NID) copy() RR { + return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} +} +func (rr *NIMLOC) copy() RR { + return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} +} +func (rr *NINFO) copy() RR { + ZSData := make([]string, len(rr.ZSData)) + copy(ZSData, rr.ZSData) + return &NINFO{*rr.Hdr.copyHeader(), ZSData} +} +func (rr *NS) copy() RR { + return &NS{*rr.Hdr.copyHeader(), rr.Ns} +} +func (rr *NSAPPTR) copy() RR { + return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} +} +func (rr *NSEC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3PARAM) copy() RR { + return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} +} +func (rr *OPENPGPKEY) copy() RR { + return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} +} +func (rr *OPT) copy() RR { + Option := make([]EDNS0, len(rr.Option)) + copy(Option, rr.Option) + return &OPT{*rr.Hdr.copyHeader(), Option} +} +func (rr *PTR) copy() RR { + return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} +} +func (rr *PX) copy() RR { + return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} +} +func (rr *RFC3597) copy() RR { + return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} +} +func (rr *RKEY) copy() RR { + return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *RP) copy() RR { + return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} +} +func (rr *RRSIG) copy() RR { + return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} +} +func (rr *RT) copy() RR { + return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} +} +func (rr *SOA) copy() RR { + return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} +} +func (rr *SPF) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &SPF{*rr.Hdr.copyHeader(), Txt} +} +func (rr *SRV) copy() RR { + return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} +} +func (rr *SSHFP) copy() RR { + return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} +} +func (rr *TA) copy() RR { + return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *TALINK) copy() RR { + return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} +} +func (rr *TKEY) copy() RR { + return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} +} +func (rr *TLSA) copy() RR { + return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *TSIG) copy() RR { + return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} +} +func (rr *TXT) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &TXT{*rr.Hdr.copyHeader(), Txt} +} +func (rr *UID) copy() RR { + return &UID{*rr.Hdr.copyHeader(), rr.Uid} +} +func (rr *UINFO) copy() RR { + return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} +} +func (rr *URI) copy() RR { + return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target} +} +func (rr *X25) copy() RR { + return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} +} diff --git a/vendor/github.com/prasmussen/gandi-api/LICENSE b/vendor/github.com/prasmussen/gandi-api/LICENSE new file mode 100644 index 000000000..4f2fadb51 --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2013 Petter Rasmussen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/prasmussen/gandi-api/client/client.go b/vendor/github.com/prasmussen/gandi-api/client/client.go new file mode 100644 index 000000000..225fab81e --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/client/client.go @@ -0,0 +1,37 @@ +package client + +import "github.com/kolo/xmlrpc" + +const ( + Production SystemType = iota + Testing +) + +type SystemType int + +func (self SystemType) Url() string { + if self == Production { + return "https://rpc.gandi.net/xmlrpc/" + } + return "https://rpc.ote.gandi.net/xmlrpc/" +} + +type Client struct { + Key string + Url string +} + +func New(apiKey string, system SystemType) *Client { + return &Client{ + Key: apiKey, + Url: system.Url(), + } +} + +func (self *Client) Call(serviceMethod string, args []interface{}, reply interface{}) error { + rpc, err := xmlrpc.NewClient(self.Url, nil) + if err != nil { + return err + } + return rpc.Call(serviceMethod, args, reply) +} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/domain.go b/vendor/github.com/prasmussen/gandi-api/domain/domain.go new file mode 100644 index 000000000..d4697f7c5 --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/domain.go @@ -0,0 +1,77 @@ +package domain + +import ( + "github.com/prasmussen/gandi-api/client" + "github.com/prasmussen/gandi-api/operation" +) + +type Domain struct { + *client.Client +} + +func New(c *client.Client) *Domain { + return &Domain{c} +} + +// Check the availability of some domain +func (self *Domain) Available(name string) (string, error) { + var result map[string]interface{} + params := []interface{}{self.Key, name} + if err := self.Call("domain.available", params, &result); err != nil { + return "", err + } + return result[name].(string), nil +} + +// Get domain information +func (self *Domain) Info(name string) (*DomainInfo, error) { + var res map[string]interface{} + params := []interface{}{self.Key, name} + if err := self.Call("domain.info", params, &res); err != nil { + return nil, err + } + return ToDomainInfo(res), nil +} + +// List domains associated to the contact represented by apikey +func (self *Domain) List() ([]*DomainInfoBase, error) { + var res []interface{} + params := []interface{}{self.Key} + if err := self.Call("domain.list", params, &res); err != nil { + return nil, err + } + + domains := make([]*DomainInfoBase, 0) + for _, r := range res { + domain := ToDomainInfoBase(r.(map[string]interface{})) + domains = append(domains, domain) + } + return domains, nil +} + +// Count domains associated to the contact represented by apikey +func (self *Domain) Count() (int64, error) { + var result int64 + params := []interface{}{self.Key} + if err := self.Call("domain.count", params, &result); err != nil { + return -1, err + } + return result, nil +} + +// Create a domain +func (self *Domain) Create(name, contactHandle string, years int64) (*operation.OperationInfo, error) { + var res map[string]interface{} + createArgs := map[string]interface{}{ + "admin": contactHandle, + "bill": contactHandle, + "owner": contactHandle, + "tech": contactHandle, + "duration": years, + } + params := []interface{}{self.Key, name, createArgs} + if err := self.Call("domain.create", params, &res); err != nil { + return nil, err + } + return operation.ToOperationInfo(res), nil +} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/structs.go b/vendor/github.com/prasmussen/gandi-api/domain/structs.go new file mode 100644 index 000000000..d7d7ff6a8 --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/structs.go @@ -0,0 +1,57 @@ +package domain + +import ( + "time" +) + +type DomainInfoBase struct { + AuthInfo string + DateCreated time.Time + DateRegistryCreation time.Time + DateRegistryEnd time.Time + DateUpdated time.Time + Fqdn string + Id int64 + Status []string + Tld string +} + +type DomainInfoExtra struct { + DateDelete time.Time + DateHoldBegin time.Time + DateHoldEnd time.Time + DatePendingDeleteEnd time.Time + DateRenewBegin time.Time + DateRestoreEnd time.Time + Nameservers []string + Services []string + ZoneId int64 + Autorenew *AutorenewInfo + Contacts *ContactInfo +} + +type DomainInfo struct { + *DomainInfoBase + *DomainInfoExtra +} + +type AutorenewInfo struct { + Active bool + Contact string + Id int64 + ProductId int64 + ProductTypeId int64 +} + +type ContactInfo struct { + Admin *ContactDetails + Bill *ContactDetails + Owner *ContactDetails + Reseller *ContactDetails + Tech *ContactDetails +} + +type ContactDetails struct { + Handle string + Id int64 +} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/util.go b/vendor/github.com/prasmussen/gandi-api/domain/util.go new file mode 100644 index 000000000..be4ece2f8 --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/util.go @@ -0,0 +1,69 @@ +package domain + +import ( + "github.com/prasmussen/gandi-api/util" +) + +func ToDomainInfoBase(res map[string]interface{}) *DomainInfoBase { + return &DomainInfoBase{ + AuthInfo: util.ToString(res["authinfo"]), + DateCreated: util.ToTime(res["date_created"]), + DateRegistryCreation: util.ToTime(res["date_registry_creation"]), + DateRegistryEnd: util.ToTime(res["date_registry_end"]), + DateUpdated: util.ToTime(res["date_updated"]), + Fqdn: util.ToString(res["fqdn"]), + Id: util.ToInt64(res["id"]), + Status: util.ToStringSlice(util.ToInterfaceSlice(res["status"])), + Tld: util.ToString(res["tld"]), + } +} + +func ToDomainInfoExtra(res map[string]interface{}) *DomainInfoExtra { + return &DomainInfoExtra{ + DateDelete: util.ToTime(res["date_delete"]), + DateHoldBegin: util.ToTime(res["date_hold_begin"]), + DateHoldEnd: util.ToTime(res["date_hold_end"]), + DatePendingDeleteEnd: util.ToTime(res["date_pending_delete_end"]), + DateRenewBegin: util.ToTime(res["date_renew_begin"]), + DateRestoreEnd: util.ToTime(res["date_restore_end"]), + Nameservers: util.ToStringSlice(util.ToInterfaceSlice(res["nameservers"])), + Services: util.ToStringSlice(util.ToInterfaceSlice(res["services"])), + ZoneId: util.ToInt64(res["zone_id"]), + Autorenew: toAutorenewInfo(util.ToXmlrpcStruct(res["autorenew"])), + Contacts: toContactInfo(util.ToXmlrpcStruct(res["contacts"])), + } +} + +func ToDomainInfo(res map[string]interface{}) *DomainInfo { + return &DomainInfo{ + ToDomainInfoBase(res), + ToDomainInfoExtra(res), + } +} + +func toAutorenewInfo(res map[string]interface{}) *AutorenewInfo { + return &AutorenewInfo{ + Active: util.ToBool(res["active"]), + Contact: util.ToString(res["contact"]), + Id: util.ToInt64(res["id"]), + ProductId: util.ToInt64(res["product_id"]), + ProductTypeId: util.ToInt64(res["product_type_id"]), + } +} + +func toContactInfo(res map[string]interface{}) *ContactInfo { + return &ContactInfo{ + Admin: toContactDetails(util.ToXmlrpcStruct(res["admin"])), + Bill: toContactDetails(util.ToXmlrpcStruct(res["bill"])), + Owner: toContactDetails(util.ToXmlrpcStruct(res["owner"])), + Reseller: toContactDetails(util.ToXmlrpcStruct(res["reseller"])), + Tech: toContactDetails(util.ToXmlrpcStruct(res["tech"])), + } +} + +func toContactDetails(res map[string]interface{}) *ContactDetails { + return &ContactDetails{ + Handle: util.ToString(res["handle"]), + Id: util.ToInt64(res["id"]), + } +} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/zone/record/record.go b/vendor/github.com/prasmussen/gandi-api/domain/zone/record/record.go new file mode 100644 index 000000000..ead956456 --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/zone/record/record.go @@ -0,0 +1,118 @@ +package record + +import "github.com/prasmussen/gandi-api/client" + +type Record struct { + *client.Client +} + +func New(c *client.Client) *Record { + return &Record{c} +} + +// Count number of records for a given zone/version +func (self *Record) Count(zoneId, version int64) (int64, error) { + var result int64 + params := []interface{}{self.Key, zoneId, version} + if err := self.Call("domain.zone.record.count", params, &result); err != nil { + return -1, err + } + return result, nil +} + +// List records of a version of a DNS zone +func (self *Record) List(zoneId, version int64) ([]*RecordInfo, error) { + var res []interface{} + params := []interface{}{self.Key, zoneId, version} + if err := self.Call("domain.zone.record.list", params, &res); err != nil { + return nil, err + } + + records := make([]*RecordInfo, 0) + for _, r := range res { + record := ToRecordInfo(r.(map[string]interface{})) + records = append(records, record) + } + return records, nil +} + +// Add a new record to zone +func (self *Record) Add(args RecordAdd) (*RecordInfo, error) { + var res map[string]interface{} + createArgs := map[string]interface{}{ + "name": args.Name, + "type": args.Type, + "value": args.Value, + "ttl": args.Ttl, + } + + params := []interface{}{self.Key, args.Zone, args.Version, createArgs} + if err := self.Call("domain.zone.record.add", params, &res); err != nil { + return nil, err + } + return ToRecordInfo(res), nil +} + +// Remove a record from a zone/version +func (self *Record) Delete(zoneId, version, recordId int64) (bool, error) { + var res int64 + deleteArgs := map[string]interface{}{"id": recordId} + params := []interface{}{self.Key, zoneId, version, deleteArgs} + if err := self.Call("domain.zone.record.delete", params, &res); err != nil { + return false, err + } + return (res == 1), nil +} + +// Update a record from zone/version +func (self *Record) Update(args RecordUpdate) ([]*RecordInfo, error) { + var res []interface{} + updateArgs := map[string]interface{}{ + "name": args.Name, + "type": args.Type, + "value": args.Value, + "ttl": args.Ttl, + } + updateOpts := map[string]int64{ + "id": args.Id, + } + + params := []interface{}{self.Key, args.Zone, args.Version, updateOpts, updateArgs} + if err := self.Call("domain.zone.record.update", params, &res); err != nil { + return nil, err + } + + records := make([]*RecordInfo, 0) + for _, r := range res { + record := ToRecordInfo(r.(map[string]interface{})) + records = append(records, record) + } + return records, nil +} + +// SetRecords replaces the entire zone with new records. +func (self *Record) SetRecords(zone_id, version_id int64, args []RecordSet) ([]*RecordInfo, error) { + var res []interface{} + + params := []interface{}{self.Key, zone_id, version_id, args} + if err := self.Call("domain.zone.record.set", params, &res); err != nil { + return nil, err + } + + records := make([]*RecordInfo, 0) + for _, r := range res { + record := ToRecordInfo(r.(map[string]interface{})) + records = append(records, record) + } + return records, nil +} + +//// Set the current zone of a domain +//func (self *Record) Set(domainName string, id int64) (*domain.DomainInfo, error) { +// var res map[string]interface{} +// params := []interface{}{self.Key, domainName, id} +// if err := self.zone.set", params, &res); err != nil { +// return nil, err +// } +// return domain.ToDomainInfo(res), nil +//} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/zone/record/structs.go b/vendor/github.com/prasmussen/gandi-api/domain/zone/record/structs.go new file mode 100644 index 000000000..03d253c72 --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/zone/record/structs.go @@ -0,0 +1,30 @@ +package record + +type RecordInfo struct { + Id int64 + Name string + Ttl int64 + Type string + Value string +} + +type RecordAdd struct { + Zone int64 `goptions:"-z, --zone, obligatory, description='Zone id'"` + Version int64 `goptions:"-v, --version, obligatory, description='Zone version'"` + Name string `goptions:"-n, --name, obligatory, description='Record name. Relative name, may contain leading wildcard. @ for empty name'"` + Type string `goptions:"-t, --type, obligatory, description='Record type'"` + Value string `goptions:"-V, --value, obligatory, description='Value for record. Semantics depends on the record type.'"` + Ttl int64 `goptions:"-T, --ttl, description='Time to live, in seconds, between 5 minutes and 30 days'"` +} + +type RecordUpdate struct { + Zone int64 `goptions:"-z, --zone, obligatory, description='Zone id'"` + Version int64 `goptions:"-v, --version, obligatory, description='Zone version'"` + Name string `goptions:"-n, --name, obligatory, description='Record name. Relative name, may contain leading wildcard. @ for empty name'"` + Type string `goptions:"-t, --type, obligatory, description='Record type'"` + Value string `goptions:"-V, --value, obligatory, description='Value for record. Semantics depends on the record type.'"` + Ttl int64 `goptions:"-T, --ttl, description='Time to live, in seconds, between 5 minutes and 30 days'"` + Id int64 `goptions:"-r, --record, obligatory, description='Record id'"` +} + +type RecordSet map[string]interface{} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/zone/record/util.go b/vendor/github.com/prasmussen/gandi-api/domain/zone/record/util.go new file mode 100644 index 000000000..5560f0daf --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/zone/record/util.go @@ -0,0 +1,16 @@ +package record + +import ( + "github.com/prasmussen/gandi-api/util" +) + + +func ToRecordInfo(res map[string]interface{}) *RecordInfo { + return &RecordInfo{ + Id: util.ToInt64(res["id"]), + Name: util.ToString(res["name"]), + Ttl: util.ToInt64(res["ttl"]), + Type: util.ToString(res["type"]), + Value: util.ToString(res["value"]), + } +} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/zone/structs.go b/vendor/github.com/prasmussen/gandi-api/domain/zone/structs.go new file mode 100644 index 000000000..b42c659b9 --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/zone/structs.go @@ -0,0 +1,24 @@ +package zone + +import ( + "time" +) + +type ZoneInfoBase struct { + DateUpdated time.Time + Id int64 + Name string + Public bool + Version int64 +} + +type ZoneInfoExtra struct { + Domains int64 + Owner string + Versions []int64 +} + +type ZoneInfo struct { + *ZoneInfoBase + *ZoneInfoExtra +} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/zone/util.go b/vendor/github.com/prasmussen/gandi-api/domain/zone/util.go new file mode 100644 index 000000000..cb21ec933 --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/zone/util.go @@ -0,0 +1,30 @@ +package zone + +import ( + "github.com/prasmussen/gandi-api/util" +) + +func ToZoneInfoBase(res map[string]interface{}) *ZoneInfoBase { + return &ZoneInfoBase{ + DateUpdated: util.ToTime(res["date_updated"]), + Id: util.ToInt64(res["id"]), + Name: util.ToString(res["name"]), + Public: util.ToBool(res["public"]), + Version: util.ToInt64(res["version"]), + } +} + +func ToZoneInfoExtra(res map[string]interface{}) *ZoneInfoExtra { + return &ZoneInfoExtra{ + Domains: util.ToInt64(res["domains"]), + Owner: util.ToString(res["owner"]), + Versions: util.ToIntSlice(util.ToInterfaceSlice(res["versions"])), + } +} + +func ToZoneInfo(res map[string]interface{}) *ZoneInfo { + return &ZoneInfo{ + ToZoneInfoBase(res), + ToZoneInfoExtra(res), + } +} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/zone/version/structs.go b/vendor/github.com/prasmussen/gandi-api/domain/zone/version/structs.go new file mode 100644 index 000000000..a563efd2c --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/zone/version/structs.go @@ -0,0 +1,10 @@ +package version + +import ( + "time" +) + +type VersionInfo struct { + Id int64 + DateCreated time.Time +} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/zone/version/util.go b/vendor/github.com/prasmussen/gandi-api/domain/zone/version/util.go new file mode 100644 index 000000000..95027b09a --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/zone/version/util.go @@ -0,0 +1,13 @@ +package version + +import ( + "github.com/prasmussen/gandi-api/util" +) + + +func ToVersionInfo(res map[string]interface{}) *VersionInfo { + return &VersionInfo{ + Id: util.ToInt64(res["id"]), + DateCreated: util.ToTime(res["date_created"]), + } +} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/zone/version/version.go b/vendor/github.com/prasmussen/gandi-api/domain/zone/version/version.go new file mode 100644 index 000000000..1a61e99d9 --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/zone/version/version.go @@ -0,0 +1,68 @@ +package version + +import "github.com/prasmussen/gandi-api/client" + +type Version struct { + *client.Client +} + +func New(c *client.Client) *Version { + return &Version{c} +} + +// Count this zone versions +func (self *Version) Count(zoneId int64) (int64, error) { + var result int64 + params := []interface{}{self.Key, zoneId} + if err := self.Call("domain.zone.version.count", params, &result); err != nil { + return -1, err + } + return result, nil +} + +// List this zone versions, with their creation date +func (self *Version) List(zoneId int64) ([]*VersionInfo, error) { + var res []interface{} + params := []interface{}{self.Key, zoneId} + if err := self.Call("domain.zone.version.list", params, &res); err != nil { + return nil, err + } + + versions := make([]*VersionInfo, 0) + for _, r := range res { + version := ToVersionInfo(r.(map[string]interface{})) + versions = append(versions, version) + } + return versions, nil +} + +// Create a new version from another version. This will duplicate the version’s records +func (self *Version) New(zoneId, version int64) (int64, error) { + var res int64 + + params := []interface{}{self.Key, zoneId, version} + if err := self.Call("domain.zone.version.new", params, &res); err != nil { + return -1, err + } + return res, nil +} + +// Delete a specific version +func (self *Version) Delete(zoneId, version int64) (bool, error) { + var res bool + params := []interface{}{self.Key, zoneId, version} + if err := self.Call("domain.zone.version.delete", params, &res); err != nil { + return false, err + } + return res, nil +} + +// Set the active version of a zone +func (self *Version) Set(zoneId, version int64) (bool, error) { + var res bool + params := []interface{}{self.Key, zoneId, version} + if err := self.Call("domain.zone.version.set", params, &res); err != nil { + return false, err + } + return res, nil +} diff --git a/vendor/github.com/prasmussen/gandi-api/domain/zone/zone.go b/vendor/github.com/prasmussen/gandi-api/domain/zone/zone.go new file mode 100644 index 000000000..d4e49744f --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/domain/zone/zone.go @@ -0,0 +1,81 @@ +package zone + +import ( + "github.com/prasmussen/gandi-api/client" + "github.com/prasmussen/gandi-api/domain" +) + +type Zone struct { + *client.Client +} + +func New(c *client.Client) *Zone { + return &Zone{c} +} + +// Counts accessible zones +func (self *Zone) Count() (int64, error) { + var result int64 + params := []interface{}{self.Key} + if err := self.Call("domain.zone.count", params, &result); err != nil { + return -1, err + } + return result, nil +} + +// Get zone information +func (self *Zone) Info(id int64) (*ZoneInfo, error) { + var res map[string]interface{} + params := []interface{}{self.Key, id} + if err := self.Call("domain.zone.info", params, &res); err != nil { + return nil, err + } + return ToZoneInfo(res), nil +} + +// List accessible DNS zones. +func (self *Zone) List() ([]*ZoneInfoBase, error) { + var res []interface{} + params := []interface{}{self.Key} + if err := self.Call("domain.zone.list", params, &res); err != nil { + return nil, err + } + + zones := make([]*ZoneInfoBase, 0) + for _, r := range res { + zone := ToZoneInfoBase(r.(map[string]interface{})) + zones = append(zones, zone) + } + return zones, nil +} + +// Create a zone +func (self *Zone) Create(name string) (*ZoneInfo, error) { + var res map[string]interface{} + createArgs := map[string]interface{}{"name": name} + params := []interface{}{self.Key, createArgs} + if err := self.Call("domain.zone.create", params, &res); err != nil { + return nil, err + } + return ToZoneInfo(res), nil +} + +// Delete a zone +func (self *Zone) Delete(id int64) (bool, error) { + var res bool + params := []interface{}{self.Key, id} + if err := self.Call("domain.zone.delete", params, &res); err != nil { + return false, err + } + return res, nil +} + +// Set the current zone of a domain +func (self *Zone) Set(domainName string, id int64) (*domain.DomainInfo, error) { + var res map[string]interface{} + params := []interface{}{self.Key, domainName, id} + if err := self.Call("domain.zone.set", params, &res); err != nil { + return nil, err + } + return domain.ToDomainInfo(res), nil +} diff --git a/vendor/github.com/prasmussen/gandi-api/operation/operation.go b/vendor/github.com/prasmussen/gandi-api/operation/operation.go new file mode 100644 index 000000000..844cddbc9 --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/operation/operation.go @@ -0,0 +1,60 @@ +package operation + +import "github.com/prasmussen/gandi-api/client" + +type Operation struct { + *client.Client +} + +func New(c *client.Client) *Operation { + return &Operation{c} +} + +// Count operations created by this contact +func (self *Operation) Count() (int64, error) { + var result int64 + // params := Params{Params: []interface{}{self.Key}} + params := []interface{}{self.Key} + if err := self.Call("operation.count", params, &result); err != nil { + return -1, err + } + return result, nil +} + +// Get operation information +func (self *Operation) Info(id int64) (*OperationInfo, error) { + var res map[string]interface{} + // params := Params{Params: []interface{}{self.Key, id}} + params := []interface{}{self.Key, id} + if err := self.Call("operation.info", params, &res); err != nil { + return nil, err + } + return ToOperationInfo(res), nil +} + +// Cancel an operation +func (self *Operation) Cancel(id int64) (bool, error) { + var res bool + // params := Params{Params: []interface{}{self.Key, id}} + params := []interface{}{self.Key, id} + if err := self.Call("operation.cancel", params, &res); err != nil { + return false, err + } + return res, nil +} + +// List operations created by this contact +func (self *Operation) List() ([]*OperationInfo, error) { + var res []interface{} + // params := Params{Params: []interface{}{self.Key}} + params := []interface{}{self.Key} + if err := self.Call("operation.list", params, &res); err != nil { + return nil, err + } + + operations := make([]*OperationInfo, len(res), len(res)) + for i, r := range res { + operations[i] = ToOperationInfo(r.(map[string]interface{})) + } + return operations, nil +} diff --git a/vendor/github.com/prasmussen/gandi-api/operation/structs.go b/vendor/github.com/prasmussen/gandi-api/operation/structs.go new file mode 100644 index 000000000..2625031ae --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/operation/structs.go @@ -0,0 +1,29 @@ +package operation + +import ( + "time" +) + +type OperationInfo struct { + DateCreated time.Time + DateStart time.Time + DateUpdated time.Time + Eta string + Id int64 + LastError string + SessionId int64 + Source string + Step string + Type string + Params map[string]interface{} + OperationDetails *OperationDetails +} + +type OperationDetails struct { + Id string + Label string + ProductAction string + ProductName string + ProductType string + Quantity int64 +} diff --git a/vendor/github.com/prasmussen/gandi-api/operation/util.go b/vendor/github.com/prasmussen/gandi-api/operation/util.go new file mode 100644 index 000000000..5f5800848 --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/operation/util.go @@ -0,0 +1,33 @@ +package operation + +import ( + "github.com/prasmussen/gandi-api/util" +) + +func ToOperationInfo(res map[string]interface{}) *OperationInfo { + return &OperationInfo{ + DateCreated: util.ToTime(res["date_created"]), + DateStart: util.ToTime(res["date_start"]), + DateUpdated: util.ToTime(res["date_updated"]), + Eta: util.ToString(res["eta"]), + Id: util.ToInt64(res["id"]), + LastError: util.ToString(res["last_error"]), + SessionId: util.ToInt64(res["session_id"]), + Source: util.ToString(res["source"]), + Step: util.ToString(res["step"]), + Type: util.ToString(res["type"]), + OperationDetails: ToOperationDetails(util.ToXmlrpcStruct(res["infos"])), + Params: util.ToXmlrpcStruct(res["params"]), + } +} + +func ToOperationDetails(res map[string]interface{}) *OperationDetails { + return &OperationDetails{ + Id: util.ToString(res["id"]), + Label: util.ToString(res["label"]), + ProductAction: util.ToString(res["product_action"]), + ProductName: util.ToString(res["product_name"]), + ProductType: util.ToString(res["product_type"]), + Quantity: util.ToInt64(res["quantity"]), + } +} diff --git a/vendor/github.com/prasmussen/gandi-api/util/util.go b/vendor/github.com/prasmussen/gandi-api/util/util.go new file mode 100644 index 000000000..be6221a9e --- /dev/null +++ b/vendor/github.com/prasmussen/gandi-api/util/util.go @@ -0,0 +1,76 @@ +package util + +import "time" + +func ToStringSlice(is []interface{}) []string { + ss := make([]string, len(is), len(is)) + + for i, _ := range is { + ss[i] = is[i].(string) + } + return ss +} + +func ToString(i interface{}) string { + if v, ok := i.(string); ok { + return v + } + return "" +} + +func ToTime(i interface{}) time.Time { + if v, ok := i.(time.Time); ok { + return v + } + var t time.Time + return t +} + +func ToInterfaceSlice(i interface{}) []interface{} { + if v, ok := i.([]interface{}); ok { + return v + } + var s []interface{} + return s +} + +func ToInt64(i interface{}) int64 { + if v, ok := i.(int64); ok { + return v + } + var n int64 + return n +} + +func ToFloat64(i interface{}) float64 { + if v, ok := i.(float64); ok { + return v + } + var n float64 + return n +} + +func ToXmlrpcStruct(i interface{}) map[string]interface{} { + if v, ok := i.(map[string]interface{}); ok { + return v + } + var s map[string]interface{} + return s +} + +func ToBool(i interface{}) bool { + if v, ok := i.(bool); ok { + return v + } + var b bool + return b +} + +func ToIntSlice(is []interface{}) []int64 { + numbers := make([]int64, len(is), len(is)) + + for i, _ := range is { + numbers[i] = ToInt64(is[i]) + } + return numbers +} diff --git a/vendor/github.com/robertkrimen/otto/DESIGN.markdown b/vendor/github.com/robertkrimen/otto/DESIGN.markdown new file mode 100644 index 000000000..288752987 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/DESIGN.markdown @@ -0,0 +1 @@ +* Designate the filename of "anonymous" source code by the hash (md5/sha1, etc.) diff --git a/vendor/github.com/robertkrimen/otto/LICENSE b/vendor/github.com/robertkrimen/otto/LICENSE new file mode 100644 index 000000000..b6179fe38 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2012 Robert Krimen + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/robertkrimen/otto/Makefile b/vendor/github.com/robertkrimen/otto/Makefile new file mode 100644 index 000000000..9868db3ca --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/Makefile @@ -0,0 +1,63 @@ +.PHONY: test test-race test-release release release-check test-262 +.PHONY: parser +.PHONY: otto assets underscore + +TESTS := \ + ~ + +TEST := -v --run +TEST := -v +TEST := -v --run Test\($(subst $(eval) ,\|,$(TESTS))\) +TEST := . + +test: parser inline.go + go test -i + go test $(TEST) + @echo PASS + +parser: + $(MAKE) -C parser + +inline.go: inline.pl + ./$< > $@ + +################# +# release, test # +################# + +release: test-race test-release + for package in . parser token ast file underscore registry; do (cd $$package && godocdown --signature > README.markdown); done + @echo \*\*\* make release-check + @echo PASS + +release-check: .test + $(MAKE) -C test build test + $(MAKE) -C .test/test262 build test + @echo PASS + +test-262: .test + $(MAKE) -C .test/test262 build test + @echo PASS + +test-release: + go test -i + go test + +test-race: + go test -race -i + go test -race + +################################# +# otto, assets, underscore, ... # +################################# + +otto: + $(MAKE) -C otto + +assets: + mkdir -p .assets + for file in underscore/test/*.js; do tr "\`" "_" < $$file > .assets/`basename $$file`; done + +underscore: + $(MAKE) -C $@ + diff --git a/vendor/github.com/robertkrimen/otto/README.markdown b/vendor/github.com/robertkrimen/otto/README.markdown new file mode 100644 index 000000000..30f734ea3 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/README.markdown @@ -0,0 +1,870 @@ +# otto +-- +```go +import "github.com/robertkrimen/otto" +``` + +Package otto is a JavaScript parser and interpreter written natively in Go. + +http://godoc.org/github.com/robertkrimen/otto + +```go +import ( + "github.com/robertkrimen/otto" +) +``` + +Run something in the VM + +```go +vm := otto.New() +vm.Run(` + abc = 2 + 2; + console.log("The value of abc is " + abc); // 4 +`) +``` + +Get a value out of the VM + +```go +if value, err := vm.Get("abc"); err == nil { + if value_int, err := value.ToInteger(); err == nil { + fmt.Printf("", value_int, err) + } +} +``` + +Set a number + +```go +vm.Set("def", 11) +vm.Run(` + console.log("The value of def is " + def); + // The value of def is 11 +`) +``` + +Set a string + +```go +vm.Set("xyzzy", "Nothing happens.") +vm.Run(` + console.log(xyzzy.length); // 16 +`) +``` + +Get the value of an expression + +```go +value, _ = vm.Run("xyzzy.length") +{ + // value is an int64 with a value of 16 + value, _ := value.ToInteger() +} +``` + +An error happens + +```go +value, err = vm.Run("abcdefghijlmnopqrstuvwxyz.length") +if err != nil { + // err = ReferenceError: abcdefghijlmnopqrstuvwxyz is not defined + // If there is an error, then value.IsUndefined() is true + ... +} +``` + +Set a Go function + +```go +vm.Set("sayHello", func(call otto.FunctionCall) otto.Value { + fmt.Printf("Hello, %s.\n", call.Argument(0).String()) + return otto.Value{} +}) +``` + +Set a Go function that returns something useful + +```go +vm.Set("twoPlus", func(call otto.FunctionCall) otto.Value { + right, _ := call.Argument(0).ToInteger() + return, _ := vm.ToValue(2 + right) + return result +}) +``` + +Use the functions in JavaScript + +```go +result, _ = vm.Run(` + sayHello("Xyzzy"); // Hello, Xyzzy. + sayHello(); // Hello, undefined + + result = twoPlus(2.0); // 4 +`) +``` + +### Parser + +A separate parser is available in the parser package if you're just interested +in building an AST. + +http://godoc.org/github.com/robertkrimen/otto/parser + +Parse and return an AST + +```go +filenamee := "" // A filename is optional +src := ` + // Sample xyzzy example + (function(){ + if (3.14159 > 0) { + console.log("Hello, World."); + return; + } + + var xyzzy = NaN; + console.log("Nothing happens."); + return xyzzy; + })(); +` + +// Parse some JavaScript, yielding a *ast.Program and/or an ErrorList +program, err := parser.ParseFile(nil, filename, src, 0) +``` + +### otto + +You can run (Go) JavaScript from the commandline with: +http://github.com/robertkrimen/otto/tree/master/otto + + $ go get -v github.com/robertkrimen/otto/otto + +Run JavaScript by entering some source on stdin or by giving otto a filename: + + $ otto example.js + +### underscore + +Optionally include the JavaScript utility-belt library, underscore, with this +import: + +```go +import ( + "github.com/robertkrimen/otto" + _ "github.com/robertkrimen/otto/underscore" +) + +// Now every otto runtime will come loaded with underscore +``` + +For more information: http://github.com/robertkrimen/otto/tree/master/underscore + + +### Caveat Emptor + +The following are some limitations with otto: + + * "use strict" will parse, but does nothing. + * The regular expression engine (re2/regexp) is not fully compatible with the ECMA5 specification. + + +### Regular Expression Incompatibility + +Go translates JavaScript-style regular expressions into something that is +"regexp" compatible via `parser.TransformRegExp`. Unfortunately, RegExp requires +backtracking for some patterns, and backtracking is not supported by the +standard Go engine: https://code.google.com/p/re2/wiki/Syntax + +Therefore, the following syntax is incompatible: + + (?=) // Lookahead (positive), currently a parsing error + (?!) // Lookahead (backhead), currently a parsing error + \1 // Backreference (\1, \2, \3, ...), currently a parsing error + +A brief discussion of these limitations: "Regexp (?!re)" +https://groups.google.com/forum/?fromgroups=#%21topic/golang-nuts/7qgSDWPIh_E + +More information about re2: https://code.google.com/p/re2/ + +In addition to the above, re2 (Go) has a different definition for \s: [\t\n\f\r +]. The JavaScript definition, on the other hand, also includes \v, Unicode +"Separator, Space", etc. + + +### Halting Problem + +If you want to stop long running executions (like third-party code), you can use +the interrupt channel to do this: + +```go +package main + +import ( + "errors" + "fmt" + "os" + "time" + + "github.com/robertkrimen/otto" +) + +var halt = errors.New("Stahp") + +func main() { + runUnsafe(`var abc = [];`) + runUnsafe(` + while (true) { + // Loop forever + }`) +} + +func runUnsafe(unsafe string) { + start := time.Now() + defer func() { + duration := time.Since(start) + if caught := recover(); caught != nil { + if caught == halt { + fmt.Fprintf(os.Stderr, "Some code took to long! Stopping after: %v\n", duration) + return + } + panic(caught) // Something else happened, repanic! + } + fmt.Fprintf(os.Stderr, "Ran code successfully: %v\n", duration) + }() + + vm := otto.New() + vm.Interrupt = make(chan func(), 1) // The buffer prevents blocking + + go func() { + time.Sleep(2 * time.Second) // Stop after two seconds + vm.Interrupt <- func() { + panic(halt) + } + }() + + vm.Run(unsafe) // Here be dragons (risky code) +} +``` + +Where is setTimeout/setInterval? + +These timing functions are not actually part of the ECMA-262 specification. +Typically, they belong to the `window` object (in the browser). It would not be +difficult to provide something like these via Go, but you probably want to wrap +otto in an event loop in that case. + +For an example of how this could be done in Go with otto, see natto: + +http://github.com/robertkrimen/natto + +Here is some more discussion of the issue: + +* http://book.mixu.net/node/ch2.html + +* http://en.wikipedia.org/wiki/Reentrancy_%28computing%29 + +* http://aaroncrane.co.uk/2009/02/perl_safe_signals/ + +## Usage + +```go +var ErrVersion = errors.New("version mismatch") +``` + +#### type Error + +```go +type Error struct { +} +``` + +An Error represents a runtime error, e.g. a TypeError, a ReferenceError, etc. + +#### func (Error) Error + +```go +func (err Error) Error() string +``` +Error returns a description of the error + + TypeError: 'def' is not a function + +#### func (Error) String + +```go +func (err Error) String() string +``` +String returns a description of the error and a trace of where the error +occurred. + + TypeError: 'def' is not a function + at xyz (:3:9) + at :7:1/ + +#### type FunctionCall + +```go +type FunctionCall struct { + This Value + ArgumentList []Value + Otto *Otto +} +``` + +FunctionCall is an encapsulation of a JavaScript function call. + +#### func (FunctionCall) Argument + +```go +func (self FunctionCall) Argument(index int) Value +``` +Argument will return the value of the argument at the given index. + +If no such argument exists, undefined is returned. + +#### type Object + +```go +type Object struct { +} +``` + +Object is the representation of a JavaScript object. + +#### func (Object) Call + +```go +func (self Object) Call(name string, argumentList ...interface{}) (Value, error) +``` +Call a method on the object. + +It is essentially equivalent to: + + var method, _ := object.Get(name) + method.Call(object, argumentList...) + +An undefined value and an error will result if: + + 1. There is an error during conversion of the argument list + 2. The property is not actually a function + 3. An (uncaught) exception is thrown + +#### func (Object) Class + +```go +func (self Object) Class() string +``` +Class will return the class string of the object. + +The return value will (generally) be one of: + + Object + Function + Array + String + Number + Boolean + Date + RegExp + +#### func (Object) Get + +```go +func (self Object) Get(name string) (Value, error) +``` +Get the value of the property with the given name. + +#### func (Object) Keys + +```go +func (self Object) Keys() []string +``` +Get the keys for the object + +Equivalent to calling Object.keys on the object + +#### func (Object) Set + +```go +func (self Object) Set(name string, value interface{}) error +``` +Set the property of the given name to the given value. + +An error will result if the setting the property triggers an exception (i.e. +read-only), or there is an error during conversion of the given value. + +#### func (Object) Value + +```go +func (self Object) Value() Value +``` +Value will return self as a value. + +#### type Otto + +```go +type Otto struct { + // Interrupt is a channel for interrupting the runtime. You can use this to halt a long running execution, for example. + // See "Halting Problem" for more information. + Interrupt chan func() +} +``` + +Otto is the representation of the JavaScript runtime. Each instance of Otto has +a self-contained namespace. + +#### func New + +```go +func New() *Otto +``` +New will allocate a new JavaScript runtime + +#### func Run + +```go +func Run(src interface{}) (*Otto, Value, error) +``` +Run will allocate a new JavaScript runtime, run the given source on the +allocated runtime, and return the runtime, resulting value, and error (if any). + +src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST +always be in UTF-8. + +src may also be a Script. + +src may also be a Program, but if the AST has been modified, then runtime +behavior is undefined. + +#### func (Otto) Call + +```go +func (self Otto) Call(source string, this interface{}, argumentList ...interface{}) (Value, error) +``` +Call the given JavaScript with a given this and arguments. + +If this is nil, then some special handling takes place to determine the proper +this value, falling back to a "standard" invocation if necessary (where this is +undefined). + +If source begins with "new " (A lowercase new followed by a space), then Call +will invoke the function constructor rather than performing a function call. In +this case, the this argument has no effect. + +```go +// value is a String object +value, _ := vm.Call("Object", nil, "Hello, World.") + +// Likewise... +value, _ := vm.Call("new Object", nil, "Hello, World.") + +// This will perform a concat on the given array and return the result +// value is [ 1, 2, 3, undefined, 4, 5, 6, 7, "abc" ] +value, _ := vm.Call(`[ 1, 2, 3, undefined, 4 ].concat`, nil, 5, 6, 7, "abc") +``` + +#### func (*Otto) Compile + +```go +func (self *Otto) Compile(filename string, src interface{}) (*Script, error) +``` +Compile will parse the given source and return a Script value or nil and an +error if there was a problem during compilation. + +```go +script, err := vm.Compile("", `var abc; if (!abc) abc = 0; abc += 2; abc;`) +vm.Run(script) +``` + +#### func (*Otto) Copy + +```go +func (in *Otto) Copy() *Otto +``` +Copy will create a copy/clone of the runtime. + +Copy is useful for saving some time when creating many similar runtimes. + +This method works by walking the original runtime and cloning each object, +scope, stash, etc. into a new runtime. + +Be on the lookout for memory leaks or inadvertent sharing of resources. + +#### func (Otto) Get + +```go +func (self Otto) Get(name string) (Value, error) +``` +Get the value of the top-level binding of the given name. + +If there is an error (like the binding does not exist), then the value will be +undefined. + +#### func (Otto) Object + +```go +func (self Otto) Object(source string) (*Object, error) +``` +Object will run the given source and return the result as an object. + +For example, accessing an existing object: + +```go +object, _ := vm.Object(`Number`) +``` + +Or, creating a new object: + +```go +object, _ := vm.Object(`({ xyzzy: "Nothing happens." })`) +``` + +Or, creating and assigning an object: + +```go +object, _ := vm.Object(`xyzzy = {}`) +object.Set("volume", 11) +``` + +If there is an error (like the source does not result in an object), then nil +and an error is returned. + +#### func (Otto) Run + +```go +func (self Otto) Run(src interface{}) (Value, error) +``` +Run will run the given source (parsing it first if necessary), returning the +resulting value and error (if any) + +src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST +always be in UTF-8. + +If the runtime is unable to parse source, then this function will return +undefined and the parse error (nothing will be evaluated in this case). + +src may also be a Script. + +src may also be a Program, but if the AST has been modified, then runtime +behavior is undefined. + +#### func (Otto) Set + +```go +func (self Otto) Set(name string, value interface{}) error +``` +Set the top-level binding of the given name to the given value. + +Set will automatically apply ToValue to the given value in order to convert it +to a JavaScript value (type Value). + +If there is an error (like the binding is read-only, or the ToValue conversion +fails), then an error is returned. + +If the top-level binding does not exist, it will be created. + +#### func (Otto) ToValue + +```go +func (self Otto) ToValue(value interface{}) (Value, error) +``` +ToValue will convert an interface{} value to a value digestible by +otto/JavaScript. + +#### type Script + +```go +type Script struct { +} +``` + +Script is a handle for some (reusable) JavaScript. Passing a Script value to a +run method will evaluate the JavaScript. + +#### func (*Script) String + +```go +func (self *Script) String() string +``` + +#### type Value + +```go +type Value struct { +} +``` + +Value is the representation of a JavaScript value. + +#### func FalseValue + +```go +func FalseValue() Value +``` +FalseValue will return a value representing false. + +It is equivalent to: + +```go +ToValue(false) +``` + +#### func NaNValue + +```go +func NaNValue() Value +``` +NaNValue will return a value representing NaN. + +It is equivalent to: + +```go +ToValue(math.NaN()) +``` + +#### func NullValue + +```go +func NullValue() Value +``` +NullValue will return a Value representing null. + +#### func ToValue + +```go +func ToValue(value interface{}) (Value, error) +``` +ToValue will convert an interface{} value to a value digestible by +otto/JavaScript + +This function will not work for advanced types (struct, map, slice/array, etc.) +and you should use Otto.ToValue instead. + +#### func TrueValue + +```go +func TrueValue() Value +``` +TrueValue will return a value representing true. + +It is equivalent to: + +```go +ToValue(true) +``` + +#### func UndefinedValue + +```go +func UndefinedValue() Value +``` +UndefinedValue will return a Value representing undefined. + +#### func (Value) Call + +```go +func (value Value) Call(this Value, argumentList ...interface{}) (Value, error) +``` +Call the value as a function with the given this value and argument list and +return the result of invocation. It is essentially equivalent to: + + value.apply(thisValue, argumentList) + +An undefined value and an error will result if: + + 1. There is an error during conversion of the argument list + 2. The value is not actually a function + 3. An (uncaught) exception is thrown + +#### func (Value) Class + +```go +func (value Value) Class() string +``` +Class will return the class string of the value or the empty string if value is +not an object. + +The return value will (generally) be one of: + + Object + Function + Array + String + Number + Boolean + Date + RegExp + +#### func (Value) Export + +```go +func (self Value) Export() (interface{}, error) +``` +Export will attempt to convert the value to a Go representation and return it +via an interface{} kind. + +Export returns an error, but it will always be nil. It is present for backwards +compatibility. + +If a reasonable conversion is not possible, then the original value is returned. + + undefined -> nil (FIXME?: Should be Value{}) + null -> nil + boolean -> bool + number -> A number type (int, float32, uint64, ...) + string -> string + Array -> []interface{} + Object -> map[string]interface{} + +#### func (Value) IsBoolean + +```go +func (value Value) IsBoolean() bool +``` +IsBoolean will return true if value is a boolean (primitive). + +#### func (Value) IsDefined + +```go +func (value Value) IsDefined() bool +``` +IsDefined will return false if the value is undefined, and true otherwise. + +#### func (Value) IsFunction + +```go +func (value Value) IsFunction() bool +``` +IsFunction will return true if value is a function. + +#### func (Value) IsNaN + +```go +func (value Value) IsNaN() bool +``` +IsNaN will return true if value is NaN (or would convert to NaN). + +#### func (Value) IsNull + +```go +func (value Value) IsNull() bool +``` +IsNull will return true if the value is null, and false otherwise. + +#### func (Value) IsNumber + +```go +func (value Value) IsNumber() bool +``` +IsNumber will return true if value is a number (primitive). + +#### func (Value) IsObject + +```go +func (value Value) IsObject() bool +``` +IsObject will return true if value is an object. + +#### func (Value) IsPrimitive + +```go +func (value Value) IsPrimitive() bool +``` +IsPrimitive will return true if value is a primitive (any kind of primitive). + +#### func (Value) IsString + +```go +func (value Value) IsString() bool +``` +IsString will return true if value is a string (primitive). + +#### func (Value) IsUndefined + +```go +func (value Value) IsUndefined() bool +``` +IsUndefined will return true if the value is undefined, and false otherwise. + +#### func (Value) Object + +```go +func (value Value) Object() *Object +``` +Object will return the object of the value, or nil if value is not an object. + +This method will not do any implicit conversion. For example, calling this +method on a string primitive value will not return a String object. + +#### func (Value) String + +```go +func (value Value) String() string +``` +String will return the value as a string. + +This method will make return the empty string if there is an error. + +#### func (Value) ToBoolean + +```go +func (value Value) ToBoolean() (bool, error) +``` +ToBoolean will convert the value to a boolean (bool). + + ToValue(0).ToBoolean() => false + ToValue("").ToBoolean() => false + ToValue(true).ToBoolean() => true + ToValue(1).ToBoolean() => true + ToValue("Nothing happens").ToBoolean() => true + +If there is an error during the conversion process (like an uncaught exception), +then the result will be false and an error. + +#### func (Value) ToFloat + +```go +func (value Value) ToFloat() (float64, error) +``` +ToFloat will convert the value to a number (float64). + + ToValue(0).ToFloat() => 0. + ToValue(1.1).ToFloat() => 1.1 + ToValue("11").ToFloat() => 11. + +If there is an error during the conversion process (like an uncaught exception), +then the result will be 0 and an error. + +#### func (Value) ToInteger + +```go +func (value Value) ToInteger() (int64, error) +``` +ToInteger will convert the value to a number (int64). + + ToValue(0).ToInteger() => 0 + ToValue(1.1).ToInteger() => 1 + ToValue("11").ToInteger() => 11 + +If there is an error during the conversion process (like an uncaught exception), +then the result will be 0 and an error. + +#### func (Value) ToString + +```go +func (value Value) ToString() (string, error) +``` +ToString will convert the value to a string (string). + + ToValue(0).ToString() => "0" + ToValue(false).ToString() => "false" + ToValue(1.1).ToString() => "1.1" + ToValue("11").ToString() => "11" + ToValue('Nothing happens.').ToString() => "Nothing happens." + +If there is an error during the conversion process (like an uncaught exception), +then the result will be the empty string ("") and an error. + +-- +**godocdown** http://github.com/robertkrimen/godocdown diff --git a/vendor/github.com/robertkrimen/otto/ast/README.markdown b/vendor/github.com/robertkrimen/otto/ast/README.markdown new file mode 100644 index 000000000..a785da911 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/ast/README.markdown @@ -0,0 +1,1068 @@ +# ast +-- + import "github.com/robertkrimen/otto/ast" + +Package ast declares types representing a JavaScript AST. + + +### Warning + +The parser and AST interfaces are still works-in-progress (particularly where +node types are concerned) and may change in the future. + +## Usage + +#### type ArrayLiteral + +```go +type ArrayLiteral struct { + LeftBracket file.Idx + RightBracket file.Idx + Value []Expression +} +``` + + +#### func (*ArrayLiteral) Idx0 + +```go +func (self *ArrayLiteral) Idx0() file.Idx +``` + +#### func (*ArrayLiteral) Idx1 + +```go +func (self *ArrayLiteral) Idx1() file.Idx +``` + +#### type AssignExpression + +```go +type AssignExpression struct { + Operator token.Token + Left Expression + Right Expression +} +``` + + +#### func (*AssignExpression) Idx0 + +```go +func (self *AssignExpression) Idx0() file.Idx +``` + +#### func (*AssignExpression) Idx1 + +```go +func (self *AssignExpression) Idx1() file.Idx +``` + +#### type BadExpression + +```go +type BadExpression struct { + From file.Idx + To file.Idx +} +``` + + +#### func (*BadExpression) Idx0 + +```go +func (self *BadExpression) Idx0() file.Idx +``` + +#### func (*BadExpression) Idx1 + +```go +func (self *BadExpression) Idx1() file.Idx +``` + +#### type BadStatement + +```go +type BadStatement struct { + From file.Idx + To file.Idx +} +``` + + +#### func (*BadStatement) Idx0 + +```go +func (self *BadStatement) Idx0() file.Idx +``` + +#### func (*BadStatement) Idx1 + +```go +func (self *BadStatement) Idx1() file.Idx +``` + +#### type BinaryExpression + +```go +type BinaryExpression struct { + Operator token.Token + Left Expression + Right Expression + Comparison bool +} +``` + + +#### func (*BinaryExpression) Idx0 + +```go +func (self *BinaryExpression) Idx0() file.Idx +``` + +#### func (*BinaryExpression) Idx1 + +```go +func (self *BinaryExpression) Idx1() file.Idx +``` + +#### type BlockStatement + +```go +type BlockStatement struct { + LeftBrace file.Idx + List []Statement + RightBrace file.Idx +} +``` + + +#### func (*BlockStatement) Idx0 + +```go +func (self *BlockStatement) Idx0() file.Idx +``` + +#### func (*BlockStatement) Idx1 + +```go +func (self *BlockStatement) Idx1() file.Idx +``` + +#### type BooleanLiteral + +```go +type BooleanLiteral struct { + Idx file.Idx + Literal string + Value bool +} +``` + + +#### func (*BooleanLiteral) Idx0 + +```go +func (self *BooleanLiteral) Idx0() file.Idx +``` + +#### func (*BooleanLiteral) Idx1 + +```go +func (self *BooleanLiteral) Idx1() file.Idx +``` + +#### type BracketExpression + +```go +type BracketExpression struct { + Left Expression + Member Expression + LeftBracket file.Idx + RightBracket file.Idx +} +``` + + +#### func (*BracketExpression) Idx0 + +```go +func (self *BracketExpression) Idx0() file.Idx +``` + +#### func (*BracketExpression) Idx1 + +```go +func (self *BracketExpression) Idx1() file.Idx +``` + +#### type BranchStatement + +```go +type BranchStatement struct { + Idx file.Idx + Token token.Token + Label *Identifier +} +``` + + +#### func (*BranchStatement) Idx0 + +```go +func (self *BranchStatement) Idx0() file.Idx +``` + +#### func (*BranchStatement) Idx1 + +```go +func (self *BranchStatement) Idx1() file.Idx +``` + +#### type CallExpression + +```go +type CallExpression struct { + Callee Expression + LeftParenthesis file.Idx + ArgumentList []Expression + RightParenthesis file.Idx +} +``` + + +#### func (*CallExpression) Idx0 + +```go +func (self *CallExpression) Idx0() file.Idx +``` + +#### func (*CallExpression) Idx1 + +```go +func (self *CallExpression) Idx1() file.Idx +``` + +#### type CaseStatement + +```go +type CaseStatement struct { + Case file.Idx + Test Expression + Consequent []Statement +} +``` + + +#### func (*CaseStatement) Idx0 + +```go +func (self *CaseStatement) Idx0() file.Idx +``` + +#### func (*CaseStatement) Idx1 + +```go +func (self *CaseStatement) Idx1() file.Idx +``` + +#### type CatchStatement + +```go +type CatchStatement struct { + Catch file.Idx + Parameter *Identifier + Body Statement +} +``` + + +#### func (*CatchStatement) Idx0 + +```go +func (self *CatchStatement) Idx0() file.Idx +``` + +#### func (*CatchStatement) Idx1 + +```go +func (self *CatchStatement) Idx1() file.Idx +``` + +#### type ConditionalExpression + +```go +type ConditionalExpression struct { + Test Expression + Consequent Expression + Alternate Expression +} +``` + + +#### func (*ConditionalExpression) Idx0 + +```go +func (self *ConditionalExpression) Idx0() file.Idx +``` + +#### func (*ConditionalExpression) Idx1 + +```go +func (self *ConditionalExpression) Idx1() file.Idx +``` + +#### type DebuggerStatement + +```go +type DebuggerStatement struct { + Debugger file.Idx +} +``` + + +#### func (*DebuggerStatement) Idx0 + +```go +func (self *DebuggerStatement) Idx0() file.Idx +``` + +#### func (*DebuggerStatement) Idx1 + +```go +func (self *DebuggerStatement) Idx1() file.Idx +``` + +#### type Declaration + +```go +type Declaration interface { + // contains filtered or unexported methods +} +``` + +All declaration nodes implement the Declaration interface. + +#### type DoWhileStatement + +```go +type DoWhileStatement struct { + Do file.Idx + Test Expression + Body Statement +} +``` + + +#### func (*DoWhileStatement) Idx0 + +```go +func (self *DoWhileStatement) Idx0() file.Idx +``` + +#### func (*DoWhileStatement) Idx1 + +```go +func (self *DoWhileStatement) Idx1() file.Idx +``` + +#### type DotExpression + +```go +type DotExpression struct { + Left Expression + Identifier Identifier +} +``` + + +#### func (*DotExpression) Idx0 + +```go +func (self *DotExpression) Idx0() file.Idx +``` + +#### func (*DotExpression) Idx1 + +```go +func (self *DotExpression) Idx1() file.Idx +``` + +#### type EmptyStatement + +```go +type EmptyStatement struct { + Semicolon file.Idx +} +``` + + +#### func (*EmptyStatement) Idx0 + +```go +func (self *EmptyStatement) Idx0() file.Idx +``` + +#### func (*EmptyStatement) Idx1 + +```go +func (self *EmptyStatement) Idx1() file.Idx +``` + +#### type Expression + +```go +type Expression interface { + Node + // contains filtered or unexported methods +} +``` + +All expression nodes implement the Expression interface. + +#### type ExpressionStatement + +```go +type ExpressionStatement struct { + Expression Expression +} +``` + + +#### func (*ExpressionStatement) Idx0 + +```go +func (self *ExpressionStatement) Idx0() file.Idx +``` + +#### func (*ExpressionStatement) Idx1 + +```go +func (self *ExpressionStatement) Idx1() file.Idx +``` + +#### type ForInStatement + +```go +type ForInStatement struct { + For file.Idx + Into Expression + Source Expression + Body Statement +} +``` + + +#### func (*ForInStatement) Idx0 + +```go +func (self *ForInStatement) Idx0() file.Idx +``` + +#### func (*ForInStatement) Idx1 + +```go +func (self *ForInStatement) Idx1() file.Idx +``` + +#### type ForStatement + +```go +type ForStatement struct { + For file.Idx + Initializer Expression + Update Expression + Test Expression + Body Statement +} +``` + + +#### func (*ForStatement) Idx0 + +```go +func (self *ForStatement) Idx0() file.Idx +``` + +#### func (*ForStatement) Idx1 + +```go +func (self *ForStatement) Idx1() file.Idx +``` + +#### type FunctionDeclaration + +```go +type FunctionDeclaration struct { + Function *FunctionLiteral +} +``` + + +#### type FunctionLiteral + +```go +type FunctionLiteral struct { + Function file.Idx + Name *Identifier + ParameterList *ParameterList + Body Statement + Source string + + DeclarationList []Declaration +} +``` + + +#### func (*FunctionLiteral) Idx0 + +```go +func (self *FunctionLiteral) Idx0() file.Idx +``` + +#### func (*FunctionLiteral) Idx1 + +```go +func (self *FunctionLiteral) Idx1() file.Idx +``` + +#### type Identifier + +```go +type Identifier struct { + Name string + Idx file.Idx +} +``` + + +#### func (*Identifier) Idx0 + +```go +func (self *Identifier) Idx0() file.Idx +``` + +#### func (*Identifier) Idx1 + +```go +func (self *Identifier) Idx1() file.Idx +``` + +#### type IfStatement + +```go +type IfStatement struct { + If file.Idx + Test Expression + Consequent Statement + Alternate Statement +} +``` + + +#### func (*IfStatement) Idx0 + +```go +func (self *IfStatement) Idx0() file.Idx +``` + +#### func (*IfStatement) Idx1 + +```go +func (self *IfStatement) Idx1() file.Idx +``` + +#### type LabelledStatement + +```go +type LabelledStatement struct { + Label *Identifier + Colon file.Idx + Statement Statement +} +``` + + +#### func (*LabelledStatement) Idx0 + +```go +func (self *LabelledStatement) Idx0() file.Idx +``` + +#### func (*LabelledStatement) Idx1 + +```go +func (self *LabelledStatement) Idx1() file.Idx +``` + +#### type NewExpression + +```go +type NewExpression struct { + New file.Idx + Callee Expression + LeftParenthesis file.Idx + ArgumentList []Expression + RightParenthesis file.Idx +} +``` + + +#### func (*NewExpression) Idx0 + +```go +func (self *NewExpression) Idx0() file.Idx +``` + +#### func (*NewExpression) Idx1 + +```go +func (self *NewExpression) Idx1() file.Idx +``` + +#### type Node + +```go +type Node interface { + Idx0() file.Idx // The index of the first character belonging to the node + Idx1() file.Idx // The index of the first character immediately after the node +} +``` + +All nodes implement the Node interface. + +#### type NullLiteral + +```go +type NullLiteral struct { + Idx file.Idx + Literal string +} +``` + + +#### func (*NullLiteral) Idx0 + +```go +func (self *NullLiteral) Idx0() file.Idx +``` + +#### func (*NullLiteral) Idx1 + +```go +func (self *NullLiteral) Idx1() file.Idx +``` + +#### type NumberLiteral + +```go +type NumberLiteral struct { + Idx file.Idx + Literal string + Value interface{} +} +``` + + +#### func (*NumberLiteral) Idx0 + +```go +func (self *NumberLiteral) Idx0() file.Idx +``` + +#### func (*NumberLiteral) Idx1 + +```go +func (self *NumberLiteral) Idx1() file.Idx +``` + +#### type ObjectLiteral + +```go +type ObjectLiteral struct { + LeftBrace file.Idx + RightBrace file.Idx + Value []Property +} +``` + + +#### func (*ObjectLiteral) Idx0 + +```go +func (self *ObjectLiteral) Idx0() file.Idx +``` + +#### func (*ObjectLiteral) Idx1 + +```go +func (self *ObjectLiteral) Idx1() file.Idx +``` + +#### type ParameterList + +```go +type ParameterList struct { + Opening file.Idx + List []*Identifier + Closing file.Idx +} +``` + + +#### type Program + +```go +type Program struct { + Body []Statement + + DeclarationList []Declaration + + File *file.File +} +``` + + +#### func (*Program) Idx0 + +```go +func (self *Program) Idx0() file.Idx +``` + +#### func (*Program) Idx1 + +```go +func (self *Program) Idx1() file.Idx +``` + +#### type Property + +```go +type Property struct { + Key string + Kind string + Value Expression +} +``` + + +#### type RegExpLiteral + +```go +type RegExpLiteral struct { + Idx file.Idx + Literal string + Pattern string + Flags string + Value string +} +``` + + +#### func (*RegExpLiteral) Idx0 + +```go +func (self *RegExpLiteral) Idx0() file.Idx +``` + +#### func (*RegExpLiteral) Idx1 + +```go +func (self *RegExpLiteral) Idx1() file.Idx +``` + +#### type ReturnStatement + +```go +type ReturnStatement struct { + Return file.Idx + Argument Expression +} +``` + + +#### func (*ReturnStatement) Idx0 + +```go +func (self *ReturnStatement) Idx0() file.Idx +``` + +#### func (*ReturnStatement) Idx1 + +```go +func (self *ReturnStatement) Idx1() file.Idx +``` + +#### type SequenceExpression + +```go +type SequenceExpression struct { + Sequence []Expression +} +``` + + +#### func (*SequenceExpression) Idx0 + +```go +func (self *SequenceExpression) Idx0() file.Idx +``` + +#### func (*SequenceExpression) Idx1 + +```go +func (self *SequenceExpression) Idx1() file.Idx +``` + +#### type Statement + +```go +type Statement interface { + Node + // contains filtered or unexported methods +} +``` + +All statement nodes implement the Statement interface. + +#### type StringLiteral + +```go +type StringLiteral struct { + Idx file.Idx + Literal string + Value string +} +``` + + +#### func (*StringLiteral) Idx0 + +```go +func (self *StringLiteral) Idx0() file.Idx +``` + +#### func (*StringLiteral) Idx1 + +```go +func (self *StringLiteral) Idx1() file.Idx +``` + +#### type SwitchStatement + +```go +type SwitchStatement struct { + Switch file.Idx + Discriminant Expression + Default int + Body []*CaseStatement +} +``` + + +#### func (*SwitchStatement) Idx0 + +```go +func (self *SwitchStatement) Idx0() file.Idx +``` + +#### func (*SwitchStatement) Idx1 + +```go +func (self *SwitchStatement) Idx1() file.Idx +``` + +#### type ThisExpression + +```go +type ThisExpression struct { + Idx file.Idx +} +``` + + +#### func (*ThisExpression) Idx0 + +```go +func (self *ThisExpression) Idx0() file.Idx +``` + +#### func (*ThisExpression) Idx1 + +```go +func (self *ThisExpression) Idx1() file.Idx +``` + +#### type ThrowStatement + +```go +type ThrowStatement struct { + Throw file.Idx + Argument Expression +} +``` + + +#### func (*ThrowStatement) Idx0 + +```go +func (self *ThrowStatement) Idx0() file.Idx +``` + +#### func (*ThrowStatement) Idx1 + +```go +func (self *ThrowStatement) Idx1() file.Idx +``` + +#### type TryStatement + +```go +type TryStatement struct { + Try file.Idx + Body Statement + Catch *CatchStatement + Finally Statement +} +``` + + +#### func (*TryStatement) Idx0 + +```go +func (self *TryStatement) Idx0() file.Idx +``` + +#### func (*TryStatement) Idx1 + +```go +func (self *TryStatement) Idx1() file.Idx +``` + +#### type UnaryExpression + +```go +type UnaryExpression struct { + Operator token.Token + Idx file.Idx // If a prefix operation + Operand Expression + Postfix bool +} +``` + + +#### func (*UnaryExpression) Idx0 + +```go +func (self *UnaryExpression) Idx0() file.Idx +``` + +#### func (*UnaryExpression) Idx1 + +```go +func (self *UnaryExpression) Idx1() file.Idx +``` + +#### type VariableDeclaration + +```go +type VariableDeclaration struct { + Var file.Idx + List []*VariableExpression +} +``` + + +#### type VariableExpression + +```go +type VariableExpression struct { + Name string + Idx file.Idx + Initializer Expression +} +``` + + +#### func (*VariableExpression) Idx0 + +```go +func (self *VariableExpression) Idx0() file.Idx +``` + +#### func (*VariableExpression) Idx1 + +```go +func (self *VariableExpression) Idx1() file.Idx +``` + +#### type VariableStatement + +```go +type VariableStatement struct { + Var file.Idx + List []Expression +} +``` + + +#### func (*VariableStatement) Idx0 + +```go +func (self *VariableStatement) Idx0() file.Idx +``` + +#### func (*VariableStatement) Idx1 + +```go +func (self *VariableStatement) Idx1() file.Idx +``` + +#### type WhileStatement + +```go +type WhileStatement struct { + While file.Idx + Test Expression + Body Statement +} +``` + + +#### func (*WhileStatement) Idx0 + +```go +func (self *WhileStatement) Idx0() file.Idx +``` + +#### func (*WhileStatement) Idx1 + +```go +func (self *WhileStatement) Idx1() file.Idx +``` + +#### type WithStatement + +```go +type WithStatement struct { + With file.Idx + Object Expression + Body Statement +} +``` + + +#### func (*WithStatement) Idx0 + +```go +func (self *WithStatement) Idx0() file.Idx +``` + +#### func (*WithStatement) Idx1 + +```go +func (self *WithStatement) Idx1() file.Idx +``` + +-- +**godocdown** http://github.com/robertkrimen/godocdown diff --git a/vendor/github.com/robertkrimen/otto/ast/comments.go b/vendor/github.com/robertkrimen/otto/ast/comments.go new file mode 100644 index 000000000..ef2cc3d89 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/ast/comments.go @@ -0,0 +1,278 @@ +package ast + +import ( + "fmt" + "github.com/robertkrimen/otto/file" +) + +// CommentPosition determines where the comment is in a given context +type CommentPosition int + +const ( + _ CommentPosition = iota + LEADING // Before the pertinent expression + TRAILING // After the pertinent expression + KEY // Before a key in an object + COLON // After a colon in a field declaration + FINAL // Final comments in a block, not belonging to a specific expression or the comment after a trailing , in an array or object literal + IF // After an if keyword + WHILE // After a while keyword + DO // After do keyword + FOR // After a for keyword + WITH // After a with keyword + TBD +) + +// Comment contains the data of the comment +type Comment struct { + Begin file.Idx + Text string + Position CommentPosition +} + +// NewComment creates a new comment +func NewComment(text string, idx file.Idx) *Comment { + comment := &Comment{ + Begin: idx, + Text: text, + Position: TBD, + } + + return comment +} + +// String returns a stringified version of the position +func (cp CommentPosition) String() string { + switch cp { + case LEADING: + return "Leading" + case TRAILING: + return "Trailing" + case KEY: + return "Key" + case COLON: + return "Colon" + case FINAL: + return "Final" + case IF: + return "If" + case WHILE: + return "While" + case DO: + return "Do" + case FOR: + return "For" + case WITH: + return "With" + default: + return "???" + } +} + +// String returns a stringified version of the comment +func (c Comment) String() string { + return fmt.Sprintf("Comment: %v", c.Text) +} + +// Comments defines the current view of comments from the parser +type Comments struct { + // CommentMap is a reference to the parser comment map + CommentMap CommentMap + // Comments lists the comments scanned, not linked to a node yet + Comments []*Comment + // future lists the comments after a line break during a sequence of comments + future []*Comment + // Current is node for which comments are linked to + Current Expression + + // wasLineBreak determines if a line break occured while scanning for comments + wasLineBreak bool + // primary determines whether or not processing a primary expression + primary bool + // afterBlock determines whether or not being after a block statement + afterBlock bool +} + +func NewComments() *Comments { + comments := &Comments{ + CommentMap: CommentMap{}, + } + + return comments +} + +func (c *Comments) String() string { + return fmt.Sprintf("NODE: %v, Comments: %v, Future: %v(LINEBREAK:%v)", c.Current, len(c.Comments), len(c.future), c.wasLineBreak) +} + +// FetchAll returns all the currently scanned comments, +// including those from the next line +func (c *Comments) FetchAll() []*Comment { + defer func() { + c.Comments = nil + c.future = nil + }() + + return append(c.Comments, c.future...) +} + +// Fetch returns all the currently scanned comments +func (c *Comments) Fetch() []*Comment { + defer func() { + c.Comments = nil + }() + + return c.Comments +} + +// ResetLineBreak marks the beginning of a new statement +func (c *Comments) ResetLineBreak() { + c.wasLineBreak = false +} + +// MarkPrimary will mark the context as processing a primary expression +func (c *Comments) MarkPrimary() { + c.primary = true + c.wasLineBreak = false +} + +// AfterBlock will mark the context as being after a block. +func (c *Comments) AfterBlock() { + c.afterBlock = true +} + +// AddComment adds a comment to the view. +// Depending on the context, comments are added normally or as post line break. +func (c *Comments) AddComment(comment *Comment) { + if c.primary { + if !c.wasLineBreak { + c.Comments = append(c.Comments, comment) + } else { + c.future = append(c.future, comment) + } + } else { + if !c.wasLineBreak || (c.Current == nil && !c.afterBlock) { + c.Comments = append(c.Comments, comment) + } else { + c.future = append(c.future, comment) + } + } +} + +// MarkComments will mark the found comments as the given position. +func (c *Comments) MarkComments(position CommentPosition) { + for _, comment := range c.Comments { + if comment.Position == TBD { + comment.Position = position + } + } + for _, c := range c.future { + if c.Position == TBD { + c.Position = position + } + } +} + +// Unset the current node and apply the comments to the current expression. +// Resets context variables. +func (c *Comments) Unset() { + if c.Current != nil { + c.applyComments(c.Current, c.Current, TRAILING) + c.Current = nil + } + c.wasLineBreak = false + c.primary = false + c.afterBlock = false +} + +// SetExpression sets the current expression. +// It is applied the found comments, unless the previous expression has not been unset. +// It is skipped if the node is already set or if it is a part of the previous node. +func (c *Comments) SetExpression(node Expression) { + // Skipping same node + if c.Current == node { + return + } + if c.Current != nil && c.Current.Idx1() == node.Idx1() { + c.Current = node + return + } + previous := c.Current + c.Current = node + + // Apply the found comments and futures to the node and the previous. + c.applyComments(node, previous, TRAILING) +} + +// PostProcessNode applies all found comments to the given node +func (c *Comments) PostProcessNode(node Node) { + c.applyComments(node, nil, TRAILING) +} + +// applyComments applies both the comments and the future comments to the given node and the previous one, +// based on the context. +func (c *Comments) applyComments(node, previous Node, position CommentPosition) { + if previous != nil { + c.CommentMap.AddComments(previous, c.Comments, position) + c.Comments = nil + } else { + c.CommentMap.AddComments(node, c.Comments, position) + c.Comments = nil + } + // Only apply the future comments to the node if the previous is set. + // This is for detecting end of line comments and which node comments on the following lines belongs to + if previous != nil { + c.CommentMap.AddComments(node, c.future, position) + c.future = nil + } +} + +// AtLineBreak will mark a line break +func (c *Comments) AtLineBreak() { + c.wasLineBreak = true +} + +// CommentMap is the data structure where all found comments are stored +type CommentMap map[Node][]*Comment + +// AddComment adds a single comment to the map +func (cm CommentMap) AddComment(node Node, comment *Comment) { + list := cm[node] + list = append(list, comment) + + cm[node] = list +} + +// AddComments adds a slice of comments, given a node and an updated position +func (cm CommentMap) AddComments(node Node, comments []*Comment, position CommentPosition) { + for _, comment := range comments { + if comment.Position == TBD { + comment.Position = position + } + cm.AddComment(node, comment) + } +} + +// Size returns the size of the map +func (cm CommentMap) Size() int { + size := 0 + for _, comments := range cm { + size += len(comments) + } + + return size +} + +// MoveComments moves comments with a given position from a node to another +func (cm CommentMap) MoveComments(from, to Node, position CommentPosition) { + for i, c := range cm[from] { + if c.Position == position { + cm.AddComment(to, c) + + // Remove the comment from the "from" slice + cm[from][i] = cm[from][len(cm[from])-1] + cm[from][len(cm[from])-1] = nil + cm[from] = cm[from][:len(cm[from])-1] + } + } +} diff --git a/vendor/github.com/robertkrimen/otto/ast/node.go b/vendor/github.com/robertkrimen/otto/ast/node.go new file mode 100644 index 000000000..49ae375d1 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/ast/node.go @@ -0,0 +1,515 @@ +/* +Package ast declares types representing a JavaScript AST. + +Warning + +The parser and AST interfaces are still works-in-progress (particularly where +node types are concerned) and may change in the future. + +*/ +package ast + +import ( + "github.com/robertkrimen/otto/file" + "github.com/robertkrimen/otto/token" +) + +// All nodes implement the Node interface. +type Node interface { + Idx0() file.Idx // The index of the first character belonging to the node + Idx1() file.Idx // The index of the first character immediately after the node +} + +// ========== // +// Expression // +// ========== // + +type ( + // All expression nodes implement the Expression interface. + Expression interface { + Node + _expressionNode() + } + + ArrayLiteral struct { + LeftBracket file.Idx + RightBracket file.Idx + Value []Expression + } + + AssignExpression struct { + Operator token.Token + Left Expression + Right Expression + } + + BadExpression struct { + From file.Idx + To file.Idx + } + + BinaryExpression struct { + Operator token.Token + Left Expression + Right Expression + Comparison bool + } + + BooleanLiteral struct { + Idx file.Idx + Literal string + Value bool + } + + BracketExpression struct { + Left Expression + Member Expression + LeftBracket file.Idx + RightBracket file.Idx + } + + CallExpression struct { + Callee Expression + LeftParenthesis file.Idx + ArgumentList []Expression + RightParenthesis file.Idx + } + + ConditionalExpression struct { + Test Expression + Consequent Expression + Alternate Expression + } + + DotExpression struct { + Left Expression + Identifier *Identifier + } + + EmptyExpression struct { + Begin file.Idx + End file.Idx + } + + FunctionLiteral struct { + Function file.Idx + Name *Identifier + ParameterList *ParameterList + Body Statement + Source string + + DeclarationList []Declaration + } + + Identifier struct { + Name string + Idx file.Idx + } + + NewExpression struct { + New file.Idx + Callee Expression + LeftParenthesis file.Idx + ArgumentList []Expression + RightParenthesis file.Idx + } + + NullLiteral struct { + Idx file.Idx + Literal string + } + + NumberLiteral struct { + Idx file.Idx + Literal string + Value interface{} + } + + ObjectLiteral struct { + LeftBrace file.Idx + RightBrace file.Idx + Value []Property + } + + ParameterList struct { + Opening file.Idx + List []*Identifier + Closing file.Idx + } + + Property struct { + Key string + Kind string + Value Expression + } + + RegExpLiteral struct { + Idx file.Idx + Literal string + Pattern string + Flags string + Value string + } + + SequenceExpression struct { + Sequence []Expression + } + + StringLiteral struct { + Idx file.Idx + Literal string + Value string + } + + ThisExpression struct { + Idx file.Idx + } + + UnaryExpression struct { + Operator token.Token + Idx file.Idx // If a prefix operation + Operand Expression + Postfix bool + } + + VariableExpression struct { + Name string + Idx file.Idx + Initializer Expression + } +) + +// _expressionNode + +func (*ArrayLiteral) _expressionNode() {} +func (*AssignExpression) _expressionNode() {} +func (*BadExpression) _expressionNode() {} +func (*BinaryExpression) _expressionNode() {} +func (*BooleanLiteral) _expressionNode() {} +func (*BracketExpression) _expressionNode() {} +func (*CallExpression) _expressionNode() {} +func (*ConditionalExpression) _expressionNode() {} +func (*DotExpression) _expressionNode() {} +func (*EmptyExpression) _expressionNode() {} +func (*FunctionLiteral) _expressionNode() {} +func (*Identifier) _expressionNode() {} +func (*NewExpression) _expressionNode() {} +func (*NullLiteral) _expressionNode() {} +func (*NumberLiteral) _expressionNode() {} +func (*ObjectLiteral) _expressionNode() {} +func (*RegExpLiteral) _expressionNode() {} +func (*SequenceExpression) _expressionNode() {} +func (*StringLiteral) _expressionNode() {} +func (*ThisExpression) _expressionNode() {} +func (*UnaryExpression) _expressionNode() {} +func (*VariableExpression) _expressionNode() {} + +// ========= // +// Statement // +// ========= // + +type ( + // All statement nodes implement the Statement interface. + Statement interface { + Node + _statementNode() + } + + BadStatement struct { + From file.Idx + To file.Idx + } + + BlockStatement struct { + LeftBrace file.Idx + List []Statement + RightBrace file.Idx + } + + BranchStatement struct { + Idx file.Idx + Token token.Token + Label *Identifier + } + + CaseStatement struct { + Case file.Idx + Test Expression + Consequent []Statement + } + + CatchStatement struct { + Catch file.Idx + Parameter *Identifier + Body Statement + } + + DebuggerStatement struct { + Debugger file.Idx + } + + DoWhileStatement struct { + Do file.Idx + Test Expression + Body Statement + } + + EmptyStatement struct { + Semicolon file.Idx + } + + ExpressionStatement struct { + Expression Expression + } + + ForInStatement struct { + For file.Idx + Into Expression + Source Expression + Body Statement + } + + ForStatement struct { + For file.Idx + Initializer Expression + Update Expression + Test Expression + Body Statement + } + + FunctionStatement struct { + Function *FunctionLiteral + } + + IfStatement struct { + If file.Idx + Test Expression + Consequent Statement + Alternate Statement + } + + LabelledStatement struct { + Label *Identifier + Colon file.Idx + Statement Statement + } + + ReturnStatement struct { + Return file.Idx + Argument Expression + } + + SwitchStatement struct { + Switch file.Idx + Discriminant Expression + Default int + Body []*CaseStatement + } + + ThrowStatement struct { + Throw file.Idx + Argument Expression + } + + TryStatement struct { + Try file.Idx + Body Statement + Catch *CatchStatement + Finally Statement + } + + VariableStatement struct { + Var file.Idx + List []Expression + } + + WhileStatement struct { + While file.Idx + Test Expression + Body Statement + } + + WithStatement struct { + With file.Idx + Object Expression + Body Statement + } +) + +// _statementNode + +func (*BadStatement) _statementNode() {} +func (*BlockStatement) _statementNode() {} +func (*BranchStatement) _statementNode() {} +func (*CaseStatement) _statementNode() {} +func (*CatchStatement) _statementNode() {} +func (*DebuggerStatement) _statementNode() {} +func (*DoWhileStatement) _statementNode() {} +func (*EmptyStatement) _statementNode() {} +func (*ExpressionStatement) _statementNode() {} +func (*ForInStatement) _statementNode() {} +func (*ForStatement) _statementNode() {} +func (*FunctionStatement) _statementNode() {} +func (*IfStatement) _statementNode() {} +func (*LabelledStatement) _statementNode() {} +func (*ReturnStatement) _statementNode() {} +func (*SwitchStatement) _statementNode() {} +func (*ThrowStatement) _statementNode() {} +func (*TryStatement) _statementNode() {} +func (*VariableStatement) _statementNode() {} +func (*WhileStatement) _statementNode() {} +func (*WithStatement) _statementNode() {} + +// =========== // +// Declaration // +// =========== // + +type ( + // All declaration nodes implement the Declaration interface. + Declaration interface { + _declarationNode() + } + + FunctionDeclaration struct { + Function *FunctionLiteral + } + + VariableDeclaration struct { + Var file.Idx + List []*VariableExpression + } +) + +// _declarationNode + +func (*FunctionDeclaration) _declarationNode() {} +func (*VariableDeclaration) _declarationNode() {} + +// ==== // +// Node // +// ==== // + +type Program struct { + Body []Statement + + DeclarationList []Declaration + + File *file.File + + Comments CommentMap +} + +// ==== // +// Idx0 // +// ==== // + +func (self *ArrayLiteral) Idx0() file.Idx { return self.LeftBracket } +func (self *AssignExpression) Idx0() file.Idx { return self.Left.Idx0() } +func (self *BadExpression) Idx0() file.Idx { return self.From } +func (self *BinaryExpression) Idx0() file.Idx { return self.Left.Idx0() } +func (self *BooleanLiteral) Idx0() file.Idx { return self.Idx } +func (self *BracketExpression) Idx0() file.Idx { return self.Left.Idx0() } +func (self *CallExpression) Idx0() file.Idx { return self.Callee.Idx0() } +func (self *ConditionalExpression) Idx0() file.Idx { return self.Test.Idx0() } +func (self *DotExpression) Idx0() file.Idx { return self.Left.Idx0() } +func (self *EmptyExpression) Idx0() file.Idx { return self.Begin } +func (self *FunctionLiteral) Idx0() file.Idx { return self.Function } +func (self *Identifier) Idx0() file.Idx { return self.Idx } +func (self *NewExpression) Idx0() file.Idx { return self.New } +func (self *NullLiteral) Idx0() file.Idx { return self.Idx } +func (self *NumberLiteral) Idx0() file.Idx { return self.Idx } +func (self *ObjectLiteral) Idx0() file.Idx { return self.LeftBrace } +func (self *RegExpLiteral) Idx0() file.Idx { return self.Idx } +func (self *SequenceExpression) Idx0() file.Idx { return self.Sequence[0].Idx0() } +func (self *StringLiteral) Idx0() file.Idx { return self.Idx } +func (self *ThisExpression) Idx0() file.Idx { return self.Idx } +func (self *UnaryExpression) Idx0() file.Idx { return self.Idx } +func (self *VariableExpression) Idx0() file.Idx { return self.Idx } + +func (self *BadStatement) Idx0() file.Idx { return self.From } +func (self *BlockStatement) Idx0() file.Idx { return self.LeftBrace } +func (self *BranchStatement) Idx0() file.Idx { return self.Idx } +func (self *CaseStatement) Idx0() file.Idx { return self.Case } +func (self *CatchStatement) Idx0() file.Idx { return self.Catch } +func (self *DebuggerStatement) Idx0() file.Idx { return self.Debugger } +func (self *DoWhileStatement) Idx0() file.Idx { return self.Do } +func (self *EmptyStatement) Idx0() file.Idx { return self.Semicolon } +func (self *ExpressionStatement) Idx0() file.Idx { return self.Expression.Idx0() } +func (self *ForInStatement) Idx0() file.Idx { return self.For } +func (self *ForStatement) Idx0() file.Idx { return self.For } +func (self *FunctionStatement) Idx0() file.Idx { return self.Function.Idx0() } +func (self *IfStatement) Idx0() file.Idx { return self.If } +func (self *LabelledStatement) Idx0() file.Idx { return self.Label.Idx0() } +func (self *Program) Idx0() file.Idx { return self.Body[0].Idx0() } +func (self *ReturnStatement) Idx0() file.Idx { return self.Return } +func (self *SwitchStatement) Idx0() file.Idx { return self.Switch } +func (self *ThrowStatement) Idx0() file.Idx { return self.Throw } +func (self *TryStatement) Idx0() file.Idx { return self.Try } +func (self *VariableStatement) Idx0() file.Idx { return self.Var } +func (self *WhileStatement) Idx0() file.Idx { return self.While } +func (self *WithStatement) Idx0() file.Idx { return self.With } + +// ==== // +// Idx1 // +// ==== // + +func (self *ArrayLiteral) Idx1() file.Idx { return self.RightBracket } +func (self *AssignExpression) Idx1() file.Idx { return self.Right.Idx1() } +func (self *BadExpression) Idx1() file.Idx { return self.To } +func (self *BinaryExpression) Idx1() file.Idx { return self.Right.Idx1() } +func (self *BooleanLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) } +func (self *BracketExpression) Idx1() file.Idx { return self.RightBracket + 1 } +func (self *CallExpression) Idx1() file.Idx { return self.RightParenthesis + 1 } +func (self *ConditionalExpression) Idx1() file.Idx { return self.Test.Idx1() } +func (self *DotExpression) Idx1() file.Idx { return self.Identifier.Idx1() } +func (self *EmptyExpression) Idx1() file.Idx { return self.End } +func (self *FunctionLiteral) Idx1() file.Idx { return self.Body.Idx1() } +func (self *Identifier) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Name)) } +func (self *NewExpression) Idx1() file.Idx { return self.RightParenthesis + 1 } +func (self *NullLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + 4) } // "null" +func (self *NumberLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) } +func (self *ObjectLiteral) Idx1() file.Idx { return self.RightBrace } +func (self *RegExpLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) } +func (self *SequenceExpression) Idx1() file.Idx { return self.Sequence[0].Idx1() } +func (self *StringLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) } +func (self *ThisExpression) Idx1() file.Idx { return self.Idx } +func (self *UnaryExpression) Idx1() file.Idx { + if self.Postfix { + return self.Operand.Idx1() + 2 // ++ -- + } + return self.Operand.Idx1() +} +func (self *VariableExpression) Idx1() file.Idx { + if self.Initializer == nil { + return file.Idx(int(self.Idx) + len(self.Name) + 1) + } + return self.Initializer.Idx1() +} + +func (self *BadStatement) Idx1() file.Idx { return self.To } +func (self *BlockStatement) Idx1() file.Idx { return self.RightBrace + 1 } +func (self *BranchStatement) Idx1() file.Idx { return self.Idx } +func (self *CaseStatement) Idx1() file.Idx { return self.Consequent[len(self.Consequent)-1].Idx1() } +func (self *CatchStatement) Idx1() file.Idx { return self.Body.Idx1() } +func (self *DebuggerStatement) Idx1() file.Idx { return self.Debugger + 8 } +func (self *DoWhileStatement) Idx1() file.Idx { return self.Test.Idx1() } +func (self *EmptyStatement) Idx1() file.Idx { return self.Semicolon + 1 } +func (self *ExpressionStatement) Idx1() file.Idx { return self.Expression.Idx1() } +func (self *ForInStatement) Idx1() file.Idx { return self.Body.Idx1() } +func (self *ForStatement) Idx1() file.Idx { return self.Body.Idx1() } +func (self *FunctionStatement) Idx1() file.Idx { return self.Function.Idx1() } +func (self *IfStatement) Idx1() file.Idx { + if self.Alternate != nil { + return self.Alternate.Idx1() + } + return self.Consequent.Idx1() +} +func (self *LabelledStatement) Idx1() file.Idx { return self.Colon + 1 } +func (self *Program) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() } +func (self *ReturnStatement) Idx1() file.Idx { return self.Return } +func (self *SwitchStatement) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() } +func (self *ThrowStatement) Idx1() file.Idx { return self.Throw } +func (self *TryStatement) Idx1() file.Idx { return self.Try } +func (self *VariableStatement) Idx1() file.Idx { return self.List[len(self.List)-1].Idx1() } +func (self *WhileStatement) Idx1() file.Idx { return self.Body.Idx1() } +func (self *WithStatement) Idx1() file.Idx { return self.Body.Idx1() } diff --git a/vendor/github.com/robertkrimen/otto/builtin.go b/vendor/github.com/robertkrimen/otto/builtin.go new file mode 100644 index 000000000..83f715083 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin.go @@ -0,0 +1,353 @@ +package otto + +import ( + "encoding/hex" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "unicode/utf16" + "unicode/utf8" +) + +// Global +func builtinGlobal_eval(call FunctionCall) Value { + src := call.Argument(0) + if !src.IsString() { + return src + } + runtime := call.runtime + program := runtime.cmpl_parseOrThrow(src.string(), nil) + if !call.eval { + // Not a direct call to eval, so we enter the global ExecutionContext + runtime.enterGlobalScope() + defer runtime.leaveScope() + } + returnValue := runtime.cmpl_evaluate_nodeProgram(program, true) + if returnValue.isEmpty() { + return Value{} + } + return returnValue +} + +func builtinGlobal_isNaN(call FunctionCall) Value { + value := call.Argument(0).float64() + return toValue_bool(math.IsNaN(value)) +} + +func builtinGlobal_isFinite(call FunctionCall) Value { + value := call.Argument(0).float64() + return toValue_bool(!math.IsNaN(value) && !math.IsInf(value, 0)) +} + +// radix 3 => 2 (ASCII 50) +47 +// radix 11 => A/a (ASCII 65/97) +54/+86 +var parseInt_alphabetTable = func() []string { + table := []string{"", "", "01"} + for radix := 3; radix <= 36; radix += 1 { + alphabet := table[radix-1] + if radix <= 10 { + alphabet += string(radix + 47) + } else { + alphabet += string(radix+54) + string(radix+86) + } + table = append(table, alphabet) + } + return table +}() + +func digitValue(chr rune) int { + switch { + case '0' <= chr && chr <= '9': + return int(chr - '0') + case 'a' <= chr && chr <= 'z': + return int(chr - 'a' + 10) + case 'A' <= chr && chr <= 'Z': + return int(chr - 'A' + 10) + } + return 36 // Larger than any legal digit value +} + +func builtinGlobal_parseInt(call FunctionCall) Value { + input := strings.TrimSpace(call.Argument(0).string()) + if len(input) == 0 { + return NaNValue() + } + + radix := int(toInt32(call.Argument(1))) + + negative := false + switch input[0] { + case '+': + input = input[1:] + case '-': + negative = true + input = input[1:] + } + + strip := true + if radix == 0 { + radix = 10 + } else { + if radix < 2 || radix > 36 { + return NaNValue() + } else if radix != 16 { + strip = false + } + } + + switch len(input) { + case 0: + return NaNValue() + case 1: + default: + if strip { + if input[0] == '0' && (input[1] == 'x' || input[1] == 'X') { + input = input[2:] + radix = 16 + } + } + } + + base := radix + index := 0 + for ; index < len(input); index++ { + digit := digitValue(rune(input[index])) // If not ASCII, then an error anyway + if digit >= base { + break + } + } + input = input[0:index] + + value, err := strconv.ParseInt(input, radix, 64) + if err != nil { + if err.(*strconv.NumError).Err == strconv.ErrRange { + base := float64(base) + // Could just be a very large number (e.g. 0x8000000000000000) + var value float64 + for _, chr := range input { + digit := float64(digitValue(chr)) + if digit >= base { + goto error + } + value = value*base + digit + } + if negative { + value *= -1 + } + return toValue_float64(value) + } + error: + return NaNValue() + } + if negative { + value *= -1 + } + + return toValue_int64(value) +} + +var parseFloat_matchBadSpecial = regexp.MustCompile(`[\+\-]?(?:[Ii]nf$|infinity)`) +var parseFloat_matchValid = regexp.MustCompile(`[0-9eE\+\-\.]|Infinity`) + +func builtinGlobal_parseFloat(call FunctionCall) Value { + // Caveat emptor: This implementation does NOT match the specification + input := strings.TrimSpace(call.Argument(0).string()) + if parseFloat_matchBadSpecial.MatchString(input) { + return NaNValue() + } + value, err := strconv.ParseFloat(input, 64) + if err != nil { + for end := len(input); end > 0; end-- { + input := input[0:end] + if !parseFloat_matchValid.MatchString(input) { + return NaNValue() + } + value, err = strconv.ParseFloat(input, 64) + if err == nil { + break + } + } + if err != nil { + return NaNValue() + } + } + return toValue_float64(value) +} + +// encodeURI/decodeURI + +func _builtinGlobal_encodeURI(call FunctionCall, escape *regexp.Regexp) Value { + value := call.Argument(0) + var input []uint16 + switch vl := value.value.(type) { + case []uint16: + input = vl + default: + input = utf16.Encode([]rune(value.string())) + } + if len(input) == 0 { + return toValue_string("") + } + output := []byte{} + length := len(input) + encode := make([]byte, 4) + for index := 0; index < length; { + value := input[index] + decode := utf16.Decode(input[index : index+1]) + if value >= 0xDC00 && value <= 0xDFFF { + panic(call.runtime.panicURIError("URI malformed")) + } + if value >= 0xD800 && value <= 0xDBFF { + index += 1 + if index >= length { + panic(call.runtime.panicURIError("URI malformed")) + } + // input = ..., value, value1, ... + value1 := input[index] + if value1 < 0xDC00 || value1 > 0xDFFF { + panic(call.runtime.panicURIError("URI malformed")) + } + decode = []rune{((rune(value) - 0xD800) * 0x400) + (rune(value1) - 0xDC00) + 0x10000} + } + index += 1 + size := utf8.EncodeRune(encode, decode[0]) + encode := encode[0:size] + output = append(output, encode...) + } + { + value := escape.ReplaceAllFunc(output, func(target []byte) []byte { + // Probably a better way of doing this + if target[0] == ' ' { + return []byte("%20") + } + return []byte(url.QueryEscape(string(target))) + }) + return toValue_string(string(value)) + } +} + +var encodeURI_Regexp = regexp.MustCompile(`([^~!@#$&*()=:/,;?+'])`) + +func builtinGlobal_encodeURI(call FunctionCall) Value { + return _builtinGlobal_encodeURI(call, encodeURI_Regexp) +} + +var encodeURIComponent_Regexp = regexp.MustCompile(`([^~!*()'])`) + +func builtinGlobal_encodeURIComponent(call FunctionCall) Value { + return _builtinGlobal_encodeURI(call, encodeURIComponent_Regexp) +} + +// 3B/2F/3F/3A/40/26/3D/2B/24/2C/23 +var decodeURI_guard = regexp.MustCompile(`(?i)(?:%)(3B|2F|3F|3A|40|26|3D|2B|24|2C|23)`) + +func _decodeURI(input string, reserve bool) (string, bool) { + if reserve { + input = decodeURI_guard.ReplaceAllString(input, "%25$1") + } + input = strings.Replace(input, "+", "%2B", -1) // Ugly hack to make QueryUnescape work with our use case + output, err := url.QueryUnescape(input) + if err != nil || !utf8.ValidString(output) { + return "", true + } + return output, false +} + +func builtinGlobal_decodeURI(call FunctionCall) Value { + output, err := _decodeURI(call.Argument(0).string(), true) + if err { + panic(call.runtime.panicURIError("URI malformed")) + } + return toValue_string(output) +} + +func builtinGlobal_decodeURIComponent(call FunctionCall) Value { + output, err := _decodeURI(call.Argument(0).string(), false) + if err { + panic(call.runtime.panicURIError("URI malformed")) + } + return toValue_string(output) +} + +// escape/unescape + +func builtin_shouldEscape(chr byte) bool { + if 'A' <= chr && chr <= 'Z' || 'a' <= chr && chr <= 'z' || '0' <= chr && chr <= '9' { + return false + } + return !strings.ContainsRune("*_+-./", rune(chr)) +} + +const escapeBase16 = "0123456789ABCDEF" + +func builtin_escape(input string) string { + output := make([]byte, 0, len(input)) + length := len(input) + for index := 0; index < length; { + if builtin_shouldEscape(input[index]) { + chr, width := utf8.DecodeRuneInString(input[index:]) + chr16 := utf16.Encode([]rune{chr})[0] + if 256 > chr16 { + output = append(output, '%', + escapeBase16[chr16>>4], + escapeBase16[chr16&15], + ) + } else { + output = append(output, '%', 'u', + escapeBase16[chr16>>12], + escapeBase16[(chr16>>8)&15], + escapeBase16[(chr16>>4)&15], + escapeBase16[chr16&15], + ) + } + index += width + + } else { + output = append(output, input[index]) + index += 1 + } + } + return string(output) +} + +func builtin_unescape(input string) string { + output := make([]rune, 0, len(input)) + length := len(input) + for index := 0; index < length; { + if input[index] == '%' { + if index <= length-6 && input[index+1] == 'u' { + byte16, err := hex.DecodeString(input[index+2 : index+6]) + if err == nil { + value := uint16(byte16[0])<<8 + uint16(byte16[1]) + chr := utf16.Decode([]uint16{value})[0] + output = append(output, chr) + index += 6 + continue + } + } + if index <= length-3 { + byte8, err := hex.DecodeString(input[index+1 : index+3]) + if err == nil { + value := uint16(byte8[0]) + chr := utf16.Decode([]uint16{value})[0] + output = append(output, chr) + index += 3 + continue + } + } + } + output = append(output, rune(input[index])) + index += 1 + } + return string(output) +} + +func builtinGlobal_escape(call FunctionCall) Value { + return toValue_string(builtin_escape(call.Argument(0).string())) +} + +func builtinGlobal_unescape(call FunctionCall) Value { + return toValue_string(builtin_unescape(call.Argument(0).string())) +} diff --git a/vendor/github.com/robertkrimen/otto/builtin_array.go b/vendor/github.com/robertkrimen/otto/builtin_array.go new file mode 100644 index 000000000..557ffc024 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin_array.go @@ -0,0 +1,681 @@ +package otto + +import ( + "strconv" + "strings" +) + +// Array + +func builtinArray(call FunctionCall) Value { + return toValue_object(builtinNewArrayNative(call.runtime, call.ArgumentList)) +} + +func builtinNewArray(self *_object, argumentList []Value) Value { + return toValue_object(builtinNewArrayNative(self.runtime, argumentList)) +} + +func builtinNewArrayNative(runtime *_runtime, argumentList []Value) *_object { + if len(argumentList) == 1 { + firstArgument := argumentList[0] + if firstArgument.IsNumber() { + return runtime.newArray(arrayUint32(runtime, firstArgument)) + } + } + return runtime.newArrayOf(argumentList) +} + +func builtinArray_toString(call FunctionCall) Value { + thisObject := call.thisObject() + join := thisObject.get("join") + if join.isCallable() { + join := join._object() + return join.call(call.This, call.ArgumentList, false, nativeFrame) + } + return builtinObject_toString(call) +} + +func builtinArray_toLocaleString(call FunctionCall) Value { + separator := "," + thisObject := call.thisObject() + length := int64(toUint32(thisObject.get("length"))) + if length == 0 { + return toValue_string("") + } + stringList := make([]string, 0, length) + for index := int64(0); index < length; index += 1 { + value := thisObject.get(arrayIndexToString(index)) + stringValue := "" + switch value.kind { + case valueEmpty, valueUndefined, valueNull: + default: + object := call.runtime.toObject(value) + toLocaleString := object.get("toLocaleString") + if !toLocaleString.isCallable() { + panic(call.runtime.panicTypeError()) + } + stringValue = toLocaleString.call(call.runtime, toValue_object(object)).string() + } + stringList = append(stringList, stringValue) + } + return toValue_string(strings.Join(stringList, separator)) +} + +func builtinArray_concat(call FunctionCall) Value { + thisObject := call.thisObject() + valueArray := []Value{} + source := append([]Value{toValue_object(thisObject)}, call.ArgumentList...) + for _, item := range source { + switch item.kind { + case valueObject: + object := item._object() + if isArray(object) { + length := object.get("length").number().int64 + for index := int64(0); index < length; index += 1 { + name := strconv.FormatInt(index, 10) + if object.hasProperty(name) { + valueArray = append(valueArray, object.get(name)) + } else { + valueArray = append(valueArray, Value{}) + } + } + continue + } + fallthrough + default: + valueArray = append(valueArray, item) + } + } + return toValue_object(call.runtime.newArrayOf(valueArray)) +} + +func builtinArray_shift(call FunctionCall) Value { + thisObject := call.thisObject() + length := int64(toUint32(thisObject.get("length"))) + if 0 == length { + thisObject.put("length", toValue_int64(0), true) + return Value{} + } + first := thisObject.get("0") + for index := int64(1); index < length; index++ { + from := arrayIndexToString(index) + to := arrayIndexToString(index - 1) + if thisObject.hasProperty(from) { + thisObject.put(to, thisObject.get(from), true) + } else { + thisObject.delete(to, true) + } + } + thisObject.delete(arrayIndexToString(length-1), true) + thisObject.put("length", toValue_int64(length-1), true) + return first +} + +func builtinArray_push(call FunctionCall) Value { + thisObject := call.thisObject() + itemList := call.ArgumentList + index := int64(toUint32(thisObject.get("length"))) + for len(itemList) > 0 { + thisObject.put(arrayIndexToString(index), itemList[0], true) + itemList = itemList[1:] + index += 1 + } + length := toValue_int64(index) + thisObject.put("length", length, true) + return length +} + +func builtinArray_pop(call FunctionCall) Value { + thisObject := call.thisObject() + length := int64(toUint32(thisObject.get("length"))) + if 0 == length { + thisObject.put("length", toValue_uint32(0), true) + return Value{} + } + last := thisObject.get(arrayIndexToString(length - 1)) + thisObject.delete(arrayIndexToString(length-1), true) + thisObject.put("length", toValue_int64(length-1), true) + return last +} + +func builtinArray_join(call FunctionCall) Value { + separator := "," + { + argument := call.Argument(0) + if argument.IsDefined() { + separator = argument.string() + } + } + thisObject := call.thisObject() + length := int64(toUint32(thisObject.get("length"))) + if length == 0 { + return toValue_string("") + } + stringList := make([]string, 0, length) + for index := int64(0); index < length; index += 1 { + value := thisObject.get(arrayIndexToString(index)) + stringValue := "" + switch value.kind { + case valueEmpty, valueUndefined, valueNull: + default: + stringValue = value.string() + } + stringList = append(stringList, stringValue) + } + return toValue_string(strings.Join(stringList, separator)) +} + +func builtinArray_splice(call FunctionCall) Value { + thisObject := call.thisObject() + length := int64(toUint32(thisObject.get("length"))) + + start := valueToRangeIndex(call.Argument(0), length, false) + deleteCount := valueToRangeIndex(call.Argument(1), int64(length)-start, true) + valueArray := make([]Value, deleteCount) + + for index := int64(0); index < deleteCount; index++ { + indexString := arrayIndexToString(int64(start + index)) + if thisObject.hasProperty(indexString) { + valueArray[index] = thisObject.get(indexString) + } + } + + // 0, <1, 2, 3, 4>, 5, 6, 7 + // a, b + // length 8 - delete 4 @ start 1 + + itemList := []Value{} + itemCount := int64(len(call.ArgumentList)) + if itemCount > 2 { + itemCount -= 2 // Less the first two arguments + itemList = call.ArgumentList[2:] + } else { + itemCount = 0 + } + if itemCount < deleteCount { + // The Object/Array is shrinking + stop := int64(length) - deleteCount + // The new length of the Object/Array before + // appending the itemList remainder + // Stopping at the lower bound of the insertion: + // Move an item from the after the deleted portion + // to a position after the inserted portion + for index := start; index < stop; index++ { + from := arrayIndexToString(index + deleteCount) // Position just after deletion + to := arrayIndexToString(index + itemCount) // Position just after splice (insertion) + if thisObject.hasProperty(from) { + thisObject.put(to, thisObject.get(from), true) + } else { + thisObject.delete(to, true) + } + } + // Delete off the end + // We don't bother to delete below (if any) since those + // will be overwritten anyway + for index := int64(length); index > (stop + itemCount); index-- { + thisObject.delete(arrayIndexToString(index-1), true) + } + } else if itemCount > deleteCount { + // The Object/Array is growing + // The itemCount is greater than the deleteCount, so we do + // not have to worry about overwriting what we should be moving + // --- + // Starting from the upper bound of the deletion: + // Move an item from the after the deleted portion + // to a position after the inserted portion + for index := int64(length) - deleteCount; index > start; index-- { + from := arrayIndexToString(index + deleteCount - 1) + to := arrayIndexToString(index + itemCount - 1) + if thisObject.hasProperty(from) { + thisObject.put(to, thisObject.get(from), true) + } else { + thisObject.delete(to, true) + } + } + } + + for index := int64(0); index < itemCount; index++ { + thisObject.put(arrayIndexToString(index+start), itemList[index], true) + } + thisObject.put("length", toValue_int64(int64(length)+itemCount-deleteCount), true) + + return toValue_object(call.runtime.newArrayOf(valueArray)) +} + +func builtinArray_slice(call FunctionCall) Value { + thisObject := call.thisObject() + + length := int64(toUint32(thisObject.get("length"))) + start, end := rangeStartEnd(call.ArgumentList, length, false) + + if start >= end { + // Always an empty array + return toValue_object(call.runtime.newArray(0)) + } + sliceLength := end - start + sliceValueArray := make([]Value, sliceLength) + + for index := int64(0); index < sliceLength; index++ { + from := arrayIndexToString(index + start) + if thisObject.hasProperty(from) { + sliceValueArray[index] = thisObject.get(from) + } + } + + return toValue_object(call.runtime.newArrayOf(sliceValueArray)) +} + +func builtinArray_unshift(call FunctionCall) Value { + thisObject := call.thisObject() + length := int64(toUint32(thisObject.get("length"))) + itemList := call.ArgumentList + itemCount := int64(len(itemList)) + + for index := length; index > 0; index-- { + from := arrayIndexToString(index - 1) + to := arrayIndexToString(index + itemCount - 1) + if thisObject.hasProperty(from) { + thisObject.put(to, thisObject.get(from), true) + } else { + thisObject.delete(to, true) + } + } + + for index := int64(0); index < itemCount; index++ { + thisObject.put(arrayIndexToString(index), itemList[index], true) + } + + newLength := toValue_int64(length + itemCount) + thisObject.put("length", newLength, true) + return newLength +} + +func builtinArray_reverse(call FunctionCall) Value { + thisObject := call.thisObject() + length := int64(toUint32(thisObject.get("length"))) + + lower := struct { + name string + index int64 + exists bool + }{} + upper := lower + + lower.index = 0 + middle := length / 2 // Division will floor + + for lower.index != middle { + lower.name = arrayIndexToString(lower.index) + upper.index = length - lower.index - 1 + upper.name = arrayIndexToString(upper.index) + + lower.exists = thisObject.hasProperty(lower.name) + upper.exists = thisObject.hasProperty(upper.name) + + if lower.exists && upper.exists { + lowerValue := thisObject.get(lower.name) + upperValue := thisObject.get(upper.name) + thisObject.put(lower.name, upperValue, true) + thisObject.put(upper.name, lowerValue, true) + } else if !lower.exists && upper.exists { + value := thisObject.get(upper.name) + thisObject.delete(upper.name, true) + thisObject.put(lower.name, value, true) + } else if lower.exists && !upper.exists { + value := thisObject.get(lower.name) + thisObject.delete(lower.name, true) + thisObject.put(upper.name, value, true) + } else { + // Nothing happens. + } + + lower.index += 1 + } + + return call.This +} + +func sortCompare(thisObject *_object, index0, index1 uint, compare *_object) int { + j := struct { + name string + exists bool + defined bool + value string + }{} + k := j + j.name = arrayIndexToString(int64(index0)) + j.exists = thisObject.hasProperty(j.name) + k.name = arrayIndexToString(int64(index1)) + k.exists = thisObject.hasProperty(k.name) + + if !j.exists && !k.exists { + return 0 + } else if !j.exists { + return 1 + } else if !k.exists { + return -1 + } + + x := thisObject.get(j.name) + y := thisObject.get(k.name) + j.defined = x.IsDefined() + k.defined = y.IsDefined() + + if !j.defined && !k.defined { + return 0 + } else if !j.defined { + return 1 + } else if !k.defined { + return -1 + } + + if compare == nil { + j.value = x.string() + k.value = y.string() + + if j.value == k.value { + return 0 + } else if j.value < k.value { + return -1 + } + + return 1 + } + + return int(toInt32(compare.call(Value{}, []Value{x, y}, false, nativeFrame))) +} + +func arraySortSwap(thisObject *_object, index0, index1 uint) { + + j := struct { + name string + exists bool + }{} + k := j + + j.name = arrayIndexToString(int64(index0)) + j.exists = thisObject.hasProperty(j.name) + k.name = arrayIndexToString(int64(index1)) + k.exists = thisObject.hasProperty(k.name) + + if j.exists && k.exists { + jValue := thisObject.get(j.name) + kValue := thisObject.get(k.name) + thisObject.put(j.name, kValue, true) + thisObject.put(k.name, jValue, true) + } else if !j.exists && k.exists { + value := thisObject.get(k.name) + thisObject.delete(k.name, true) + thisObject.put(j.name, value, true) + } else if j.exists && !k.exists { + value := thisObject.get(j.name) + thisObject.delete(j.name, true) + thisObject.put(k.name, value, true) + } else { + // Nothing happens. + } +} + +func arraySortQuickPartition(thisObject *_object, left, right, pivot uint, compare *_object) (uint, uint) { + arraySortSwap(thisObject, pivot, right) // Right is now the pivot value + cursor := left + cursor2 := left + for index := left; index < right; index++ { + comparison := sortCompare(thisObject, index, right, compare) // Compare to the pivot value + if comparison < 0 { + arraySortSwap(thisObject, index, cursor) + if cursor < cursor2 { + arraySortSwap(thisObject, index, cursor2) + } + cursor += 1 + cursor2 += 1 + } else if comparison == 0 { + arraySortSwap(thisObject, index, cursor2) + cursor2 += 1 + } + } + arraySortSwap(thisObject, cursor2, right) + return cursor, cursor2 +} + +func arraySortQuickSort(thisObject *_object, left, right uint, compare *_object) { + if left < right { + middle := left + (right-left)/2 + pivot, pivot2 := arraySortQuickPartition(thisObject, left, right, middle, compare) + if pivot > 0 { + arraySortQuickSort(thisObject, left, pivot-1, compare) + } + arraySortQuickSort(thisObject, pivot2+1, right, compare) + } +} + +func builtinArray_sort(call FunctionCall) Value { + thisObject := call.thisObject() + length := uint(toUint32(thisObject.get("length"))) + compareValue := call.Argument(0) + compare := compareValue._object() + if compareValue.IsUndefined() { + } else if !compareValue.isCallable() { + panic(call.runtime.panicTypeError()) + } + if length > 1 { + arraySortQuickSort(thisObject, 0, length-1, compare) + } + return call.This +} + +func builtinArray_isArray(call FunctionCall) Value { + return toValue_bool(isArray(call.Argument(0)._object())) +} + +func builtinArray_indexOf(call FunctionCall) Value { + thisObject, matchValue := call.thisObject(), call.Argument(0) + if length := int64(toUint32(thisObject.get("length"))); length > 0 { + index := int64(0) + if len(call.ArgumentList) > 1 { + index = call.Argument(1).number().int64 + } + if index < 0 { + if index += length; index < 0 { + index = 0 + } + } else if index >= length { + index = -1 + } + for ; index >= 0 && index < length; index++ { + name := arrayIndexToString(int64(index)) + if !thisObject.hasProperty(name) { + continue + } + value := thisObject.get(name) + if strictEqualityComparison(matchValue, value) { + return toValue_uint32(uint32(index)) + } + } + } + return toValue_int(-1) +} + +func builtinArray_lastIndexOf(call FunctionCall) Value { + thisObject, matchValue := call.thisObject(), call.Argument(0) + length := int64(toUint32(thisObject.get("length"))) + index := length - 1 + if len(call.ArgumentList) > 1 { + index = call.Argument(1).number().int64 + } + if 0 > index { + index += length + } + if index > length { + index = length - 1 + } else if 0 > index { + return toValue_int(-1) + } + for ; index >= 0; index-- { + name := arrayIndexToString(int64(index)) + if !thisObject.hasProperty(name) { + continue + } + value := thisObject.get(name) + if strictEqualityComparison(matchValue, value) { + return toValue_uint32(uint32(index)) + } + } + return toValue_int(-1) +} + +func builtinArray_every(call FunctionCall) Value { + thisObject := call.thisObject() + this := toValue_object(thisObject) + if iterator := call.Argument(0); iterator.isCallable() { + length := int64(toUint32(thisObject.get("length"))) + callThis := call.Argument(1) + for index := int64(0); index < length; index++ { + if key := arrayIndexToString(index); thisObject.hasProperty(key) { + if value := thisObject.get(key); iterator.call(call.runtime, callThis, value, toValue_int64(index), this).bool() { + continue + } + return falseValue + } + } + return trueValue + } + panic(call.runtime.panicTypeError()) +} + +func builtinArray_some(call FunctionCall) Value { + thisObject := call.thisObject() + this := toValue_object(thisObject) + if iterator := call.Argument(0); iterator.isCallable() { + length := int64(toUint32(thisObject.get("length"))) + callThis := call.Argument(1) + for index := int64(0); index < length; index++ { + if key := arrayIndexToString(index); thisObject.hasProperty(key) { + if value := thisObject.get(key); iterator.call(call.runtime, callThis, value, toValue_int64(index), this).bool() { + return trueValue + } + } + } + return falseValue + } + panic(call.runtime.panicTypeError()) +} + +func builtinArray_forEach(call FunctionCall) Value { + thisObject := call.thisObject() + this := toValue_object(thisObject) + if iterator := call.Argument(0); iterator.isCallable() { + length := int64(toUint32(thisObject.get("length"))) + callThis := call.Argument(1) + for index := int64(0); index < length; index++ { + if key := arrayIndexToString(index); thisObject.hasProperty(key) { + iterator.call(call.runtime, callThis, thisObject.get(key), toValue_int64(index), this) + } + } + return Value{} + } + panic(call.runtime.panicTypeError()) +} + +func builtinArray_map(call FunctionCall) Value { + thisObject := call.thisObject() + this := toValue_object(thisObject) + if iterator := call.Argument(0); iterator.isCallable() { + length := int64(toUint32(thisObject.get("length"))) + callThis := call.Argument(1) + values := make([]Value, length) + for index := int64(0); index < length; index++ { + if key := arrayIndexToString(index); thisObject.hasProperty(key) { + values[index] = iterator.call(call.runtime, callThis, thisObject.get(key), index, this) + } else { + values[index] = Value{} + } + } + return toValue_object(call.runtime.newArrayOf(values)) + } + panic(call.runtime.panicTypeError()) +} + +func builtinArray_filter(call FunctionCall) Value { + thisObject := call.thisObject() + this := toValue_object(thisObject) + if iterator := call.Argument(0); iterator.isCallable() { + length := int64(toUint32(thisObject.get("length"))) + callThis := call.Argument(1) + values := make([]Value, 0) + for index := int64(0); index < length; index++ { + if key := arrayIndexToString(index); thisObject.hasProperty(key) { + value := thisObject.get(key) + if iterator.call(call.runtime, callThis, value, index, this).bool() { + values = append(values, value) + } + } + } + return toValue_object(call.runtime.newArrayOf(values)) + } + panic(call.runtime.panicTypeError()) +} + +func builtinArray_reduce(call FunctionCall) Value { + thisObject := call.thisObject() + this := toValue_object(thisObject) + if iterator := call.Argument(0); iterator.isCallable() { + initial := len(call.ArgumentList) > 1 + start := call.Argument(1) + length := int64(toUint32(thisObject.get("length"))) + index := int64(0) + if length > 0 || initial { + var accumulator Value + if !initial { + for ; index < length; index++ { + if key := arrayIndexToString(index); thisObject.hasProperty(key) { + accumulator = thisObject.get(key) + index++ + break + } + } + } else { + accumulator = start + } + for ; index < length; index++ { + if key := arrayIndexToString(index); thisObject.hasProperty(key) { + accumulator = iterator.call(call.runtime, Value{}, accumulator, thisObject.get(key), key, this) + } + } + return accumulator + } + } + panic(call.runtime.panicTypeError()) +} + +func builtinArray_reduceRight(call FunctionCall) Value { + thisObject := call.thisObject() + this := toValue_object(thisObject) + if iterator := call.Argument(0); iterator.isCallable() { + initial := len(call.ArgumentList) > 1 + start := call.Argument(1) + length := int64(toUint32(thisObject.get("length"))) + if length > 0 || initial { + index := length - 1 + var accumulator Value + if !initial { + for ; index >= 0; index-- { + if key := arrayIndexToString(index); thisObject.hasProperty(key) { + accumulator = thisObject.get(key) + index-- + break + } + } + } else { + accumulator = start + } + for ; index >= 0; index-- { + if key := arrayIndexToString(index); thisObject.hasProperty(key) { + accumulator = iterator.call(call.runtime, Value{}, accumulator, thisObject.get(key), key, this) + } + } + return accumulator + } + } + panic(call.runtime.panicTypeError()) +} diff --git a/vendor/github.com/robertkrimen/otto/builtin_boolean.go b/vendor/github.com/robertkrimen/otto/builtin_boolean.go new file mode 100644 index 000000000..59b8e789b --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin_boolean.go @@ -0,0 +1,28 @@ +package otto + +// Boolean + +func builtinBoolean(call FunctionCall) Value { + return toValue_bool(call.Argument(0).bool()) +} + +func builtinNewBoolean(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newBoolean(valueOfArrayIndex(argumentList, 0))) +} + +func builtinBoolean_toString(call FunctionCall) Value { + value := call.This + if !value.IsBoolean() { + // Will throw a TypeError if ThisObject is not a Boolean + value = call.thisClassObject("Boolean").primitiveValue() + } + return toValue_string(value.string()) +} + +func builtinBoolean_valueOf(call FunctionCall) Value { + value := call.This + if !value.IsBoolean() { + value = call.thisClassObject("Boolean").primitiveValue() + } + return value +} diff --git a/vendor/github.com/robertkrimen/otto/builtin_date.go b/vendor/github.com/robertkrimen/otto/builtin_date.go new file mode 100644 index 000000000..f20bf8e3f --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin_date.go @@ -0,0 +1,615 @@ +package otto + +import ( + "math" + Time "time" +) + +// Date + +const ( + // TODO Be like V8? + // builtinDate_goDateTimeLayout = "Mon Jan 2 2006 15:04:05 GMT-0700 (MST)" + builtinDate_goDateTimeLayout = Time.RFC1123 // "Mon, 02 Jan 2006 15:04:05 MST" + builtinDate_goDateLayout = "Mon, 02 Jan 2006" + builtinDate_goTimeLayout = "15:04:05 MST" +) + +func builtinDate(call FunctionCall) Value { + date := &_dateObject{} + date.Set(newDateTime([]Value{}, Time.Local)) + return toValue_string(date.Time().Format(builtinDate_goDateTimeLayout)) +} + +func builtinNewDate(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newDate(newDateTime(argumentList, Time.Local))) +} + +func builtinDate_toString(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return toValue_string("Invalid Date") + } + return toValue_string(date.Time().Local().Format(builtinDate_goDateTimeLayout)) +} + +func builtinDate_toDateString(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return toValue_string("Invalid Date") + } + return toValue_string(date.Time().Local().Format(builtinDate_goDateLayout)) +} + +func builtinDate_toTimeString(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return toValue_string("Invalid Date") + } + return toValue_string(date.Time().Local().Format(builtinDate_goTimeLayout)) +} + +func builtinDate_toUTCString(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return toValue_string("Invalid Date") + } + return toValue_string(date.Time().Format(builtinDate_goDateTimeLayout)) +} + +func builtinDate_toISOString(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return toValue_string("Invalid Date") + } + return toValue_string(date.Time().Format("2006-01-02T15:04:05.000Z")) +} + +func builtinDate_toJSON(call FunctionCall) Value { + object := call.thisObject() + value := object.DefaultValue(defaultValueHintNumber) // FIXME object.primitiveNumberValue + { // FIXME value.isFinite + value := value.float64() + if math.IsNaN(value) || math.IsInf(value, 0) { + return nullValue + } + } + toISOString := object.get("toISOString") + if !toISOString.isCallable() { + // FIXME + panic(call.runtime.panicTypeError()) + } + return toISOString.call(call.runtime, toValue_object(object), []Value{}) +} + +func builtinDate_toGMTString(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return toValue_string("Invalid Date") + } + return toValue_string(date.Time().Format("Mon, 02 Jan 2006 15:04:05 GMT")) +} + +func builtinDate_getTime(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + // We do this (convert away from a float) so the user + // does not get something back in exponential notation + return toValue_int64(int64(date.Epoch())) +} + +func builtinDate_setTime(call FunctionCall) Value { + object := call.thisObject() + date := dateObjectOf(call.runtime, call.thisObject()) + date.Set(call.Argument(0).float64()) + object.value = date + return date.Value() +} + +func _builtinDate_beforeSet(call FunctionCall, argumentLimit int, timeLocal bool) (*_object, *_dateObject, *_ecmaTime, []int) { + object := call.thisObject() + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return nil, nil, nil, nil + } + + if argumentLimit > len(call.ArgumentList) { + argumentLimit = len(call.ArgumentList) + } + + if argumentLimit == 0 { + object.value = invalidDateObject + return nil, nil, nil, nil + } + + valueList := make([]int, argumentLimit) + for index := 0; index < argumentLimit; index++ { + value := call.ArgumentList[index] + nm := value.number() + switch nm.kind { + case numberInteger, numberFloat: + default: + object.value = invalidDateObject + return nil, nil, nil, nil + } + valueList[index] = int(nm.int64) + } + baseTime := date.Time() + if timeLocal { + baseTime = baseTime.Local() + } + ecmaTime := ecmaTime(baseTime) + return object, &date, &ecmaTime, valueList +} + +func builtinDate_parse(call FunctionCall) Value { + date := call.Argument(0).string() + return toValue_float64(dateParse(date)) +} + +func builtinDate_UTC(call FunctionCall) Value { + return toValue_float64(newDateTime(call.ArgumentList, Time.UTC)) +} + +func builtinDate_now(call FunctionCall) Value { + call.ArgumentList = []Value(nil) + return builtinDate_UTC(call) +} + +// This is a placeholder +func builtinDate_toLocaleString(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return toValue_string("Invalid Date") + } + return toValue_string(date.Time().Local().Format("2006-01-02 15:04:05")) +} + +// This is a placeholder +func builtinDate_toLocaleDateString(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return toValue_string("Invalid Date") + } + return toValue_string(date.Time().Local().Format("2006-01-02")) +} + +// This is a placeholder +func builtinDate_toLocaleTimeString(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return toValue_string("Invalid Date") + } + return toValue_string(date.Time().Local().Format("15:04:05")) +} + +func builtinDate_valueOf(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return date.Value() +} + +func builtinDate_getYear(call FunctionCall) Value { + // Will throw a TypeError is ThisObject is nil or + // does not have Class of "Date" + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Local().Year() - 1900) +} + +func builtinDate_getFullYear(call FunctionCall) Value { + // Will throw a TypeError is ThisObject is nil or + // does not have Class of "Date" + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Local().Year()) +} + +func builtinDate_getUTCFullYear(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Year()) +} + +func builtinDate_getMonth(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(dateFromGoMonth(date.Time().Local().Month())) +} + +func builtinDate_getUTCMonth(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(dateFromGoMonth(date.Time().Month())) +} + +func builtinDate_getDate(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Local().Day()) +} + +func builtinDate_getUTCDate(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Day()) +} + +func builtinDate_getDay(call FunctionCall) Value { + // Actually day of the week + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(dateFromGoDay(date.Time().Local().Weekday())) +} + +func builtinDate_getUTCDay(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(dateFromGoDay(date.Time().Weekday())) +} + +func builtinDate_getHours(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Local().Hour()) +} + +func builtinDate_getUTCHours(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Hour()) +} + +func builtinDate_getMinutes(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Local().Minute()) +} + +func builtinDate_getUTCMinutes(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Minute()) +} + +func builtinDate_getSeconds(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Local().Second()) +} + +func builtinDate_getUTCSeconds(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Second()) +} + +func builtinDate_getMilliseconds(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Local().Nanosecond() / (100 * 100 * 100)) +} + +func builtinDate_getUTCMilliseconds(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + return toValue_int(date.Time().Nanosecond() / (100 * 100 * 100)) +} + +func builtinDate_getTimezoneOffset(call FunctionCall) Value { + date := dateObjectOf(call.runtime, call.thisObject()) + if date.isNaN { + return NaNValue() + } + timeLocal := date.Time().Local() + // Is this kosher? + timeLocalAsUTC := Time.Date( + timeLocal.Year(), + timeLocal.Month(), + timeLocal.Day(), + timeLocal.Hour(), + timeLocal.Minute(), + timeLocal.Second(), + timeLocal.Nanosecond(), + Time.UTC, + ) + return toValue_float64(date.Time().Sub(timeLocalAsUTC).Seconds() / 60) +} + +func builtinDate_setMilliseconds(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 1, true) + if ecmaTime == nil { + return NaNValue() + } + + ecmaTime.millisecond = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setUTCMilliseconds(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 1, false) + if ecmaTime == nil { + return NaNValue() + } + + ecmaTime.millisecond = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setSeconds(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 2, true) + if ecmaTime == nil { + return NaNValue() + } + + if len(value) > 1 { + ecmaTime.millisecond = value[1] + } + ecmaTime.second = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setUTCSeconds(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 2, false) + if ecmaTime == nil { + return NaNValue() + } + + if len(value) > 1 { + ecmaTime.millisecond = value[1] + } + ecmaTime.second = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setMinutes(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 3, true) + if ecmaTime == nil { + return NaNValue() + } + + if len(value) > 2 { + ecmaTime.millisecond = value[2] + ecmaTime.second = value[1] + } else if len(value) > 1 { + ecmaTime.second = value[1] + } + ecmaTime.minute = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setUTCMinutes(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 3, false) + if ecmaTime == nil { + return NaNValue() + } + + if len(value) > 2 { + ecmaTime.millisecond = value[2] + ecmaTime.second = value[1] + } else if len(value) > 1 { + ecmaTime.second = value[1] + } + ecmaTime.minute = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setHours(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 4, true) + if ecmaTime == nil { + return NaNValue() + } + + if len(value) > 3 { + ecmaTime.millisecond = value[3] + ecmaTime.second = value[2] + ecmaTime.minute = value[1] + } else if len(value) > 2 { + ecmaTime.second = value[2] + ecmaTime.minute = value[1] + } else if len(value) > 1 { + ecmaTime.minute = value[1] + } + ecmaTime.hour = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setUTCHours(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 4, false) + if ecmaTime == nil { + return NaNValue() + } + + if len(value) > 3 { + ecmaTime.millisecond = value[3] + ecmaTime.second = value[2] + ecmaTime.minute = value[1] + } else if len(value) > 2 { + ecmaTime.second = value[2] + ecmaTime.minute = value[1] + } else if len(value) > 1 { + ecmaTime.minute = value[1] + } + ecmaTime.hour = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setDate(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 1, true) + if ecmaTime == nil { + return NaNValue() + } + + ecmaTime.day = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setUTCDate(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 1, false) + if ecmaTime == nil { + return NaNValue() + } + + ecmaTime.day = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setMonth(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 2, true) + if ecmaTime == nil { + return NaNValue() + } + + if len(value) > 1 { + ecmaTime.day = value[1] + } + ecmaTime.month = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setUTCMonth(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 2, false) + if ecmaTime == nil { + return NaNValue() + } + + if len(value) > 1 { + ecmaTime.day = value[1] + } + ecmaTime.month = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setYear(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 1, true) + if ecmaTime == nil { + return NaNValue() + } + + year := value[0] + if 0 <= year && year <= 99 { + year += 1900 + } + ecmaTime.year = year + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setFullYear(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 3, true) + if ecmaTime == nil { + return NaNValue() + } + + if len(value) > 2 { + ecmaTime.day = value[2] + ecmaTime.month = value[1] + } else if len(value) > 1 { + ecmaTime.month = value[1] + } + ecmaTime.year = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +func builtinDate_setUTCFullYear(call FunctionCall) Value { + object, date, ecmaTime, value := _builtinDate_beforeSet(call, 3, false) + if ecmaTime == nil { + return NaNValue() + } + + if len(value) > 2 { + ecmaTime.day = value[2] + ecmaTime.month = value[1] + } else if len(value) > 1 { + ecmaTime.month = value[1] + } + ecmaTime.year = value[0] + + date.SetTime(ecmaTime.goTime()) + object.value = *date + return date.Value() +} + +// toUTCString +// toISOString +// toJSONString +// toJSON diff --git a/vendor/github.com/robertkrimen/otto/builtin_error.go b/vendor/github.com/robertkrimen/otto/builtin_error.go new file mode 100644 index 000000000..41da84ef2 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin_error.go @@ -0,0 +1,126 @@ +package otto + +import ( + "fmt" +) + +func builtinError(call FunctionCall) Value { + return toValue_object(call.runtime.newError("Error", call.Argument(0), 1)) +} + +func builtinNewError(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newError("Error", valueOfArrayIndex(argumentList, 0), 0)) +} + +func builtinError_toString(call FunctionCall) Value { + thisObject := call.thisObject() + if thisObject == nil { + panic(call.runtime.panicTypeError()) + } + + name := "Error" + nameValue := thisObject.get("name") + if nameValue.IsDefined() { + name = nameValue.string() + } + + message := "" + messageValue := thisObject.get("message") + if messageValue.IsDefined() { + message = messageValue.string() + } + + if len(name) == 0 { + return toValue_string(message) + } + + if len(message) == 0 { + return toValue_string(name) + } + + return toValue_string(fmt.Sprintf("%s: %s", name, message)) +} + +func (runtime *_runtime) newEvalError(message Value) *_object { + self := runtime.newErrorObject("EvalError", message, 0) + self.prototype = runtime.global.EvalErrorPrototype + return self +} + +func builtinEvalError(call FunctionCall) Value { + return toValue_object(call.runtime.newEvalError(call.Argument(0))) +} + +func builtinNewEvalError(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newEvalError(valueOfArrayIndex(argumentList, 0))) +} + +func (runtime *_runtime) newTypeError(message Value) *_object { + self := runtime.newErrorObject("TypeError", message, 0) + self.prototype = runtime.global.TypeErrorPrototype + return self +} + +func builtinTypeError(call FunctionCall) Value { + return toValue_object(call.runtime.newTypeError(call.Argument(0))) +} + +func builtinNewTypeError(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newTypeError(valueOfArrayIndex(argumentList, 0))) +} + +func (runtime *_runtime) newRangeError(message Value) *_object { + self := runtime.newErrorObject("RangeError", message, 0) + self.prototype = runtime.global.RangeErrorPrototype + return self +} + +func builtinRangeError(call FunctionCall) Value { + return toValue_object(call.runtime.newRangeError(call.Argument(0))) +} + +func builtinNewRangeError(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newRangeError(valueOfArrayIndex(argumentList, 0))) +} + +func (runtime *_runtime) newURIError(message Value) *_object { + self := runtime.newErrorObject("URIError", message, 0) + self.prototype = runtime.global.URIErrorPrototype + return self +} + +func (runtime *_runtime) newReferenceError(message Value) *_object { + self := runtime.newErrorObject("ReferenceError", message, 0) + self.prototype = runtime.global.ReferenceErrorPrototype + return self +} + +func builtinReferenceError(call FunctionCall) Value { + return toValue_object(call.runtime.newReferenceError(call.Argument(0))) +} + +func builtinNewReferenceError(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newReferenceError(valueOfArrayIndex(argumentList, 0))) +} + +func (runtime *_runtime) newSyntaxError(message Value) *_object { + self := runtime.newErrorObject("SyntaxError", message, 0) + self.prototype = runtime.global.SyntaxErrorPrototype + return self +} + +func builtinSyntaxError(call FunctionCall) Value { + return toValue_object(call.runtime.newSyntaxError(call.Argument(0))) +} + +func builtinNewSyntaxError(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newSyntaxError(valueOfArrayIndex(argumentList, 0))) +} + +func builtinURIError(call FunctionCall) Value { + return toValue_object(call.runtime.newURIError(call.Argument(0))) +} + +func builtinNewURIError(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newURIError(valueOfArrayIndex(argumentList, 0))) +} diff --git a/vendor/github.com/robertkrimen/otto/builtin_function.go b/vendor/github.com/robertkrimen/otto/builtin_function.go new file mode 100644 index 000000000..3d07566c6 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin_function.go @@ -0,0 +1,129 @@ +package otto + +import ( + "fmt" + "regexp" + "strings" + "unicode" + + "github.com/robertkrimen/otto/parser" +) + +// Function + +func builtinFunction(call FunctionCall) Value { + return toValue_object(builtinNewFunctionNative(call.runtime, call.ArgumentList)) +} + +func builtinNewFunction(self *_object, argumentList []Value) Value { + return toValue_object(builtinNewFunctionNative(self.runtime, argumentList)) +} + +func argumentList2parameterList(argumentList []Value) []string { + parameterList := make([]string, 0, len(argumentList)) + for _, value := range argumentList { + tmp := strings.FieldsFunc(value.string(), func(chr rune) bool { + return chr == ',' || unicode.IsSpace(chr) + }) + parameterList = append(parameterList, tmp...) + } + return parameterList +} + +var matchIdentifier = regexp.MustCompile(`^[$_\p{L}][$_\p{L}\d}]*$`) + +func builtinNewFunctionNative(runtime *_runtime, argumentList []Value) *_object { + var parameterList, body string + count := len(argumentList) + if count > 0 { + tmp := make([]string, 0, count-1) + for _, value := range argumentList[0 : count-1] { + tmp = append(tmp, value.string()) + } + parameterList = strings.Join(tmp, ",") + body = argumentList[count-1].string() + } + + // FIXME + function, err := parser.ParseFunction(parameterList, body) + runtime.parseThrow(err) // Will panic/throw appropriately + cmpl := _compiler{} + cmpl_function := cmpl.parseExpression(function) + + return runtime.newNodeFunction(cmpl_function.(*_nodeFunctionLiteral), runtime.globalStash) +} + +func builtinFunction_toString(call FunctionCall) Value { + object := call.thisClassObject("Function") // Should throw a TypeError unless Function + switch fn := object.value.(type) { + case _nativeFunctionObject: + return toValue_string(fmt.Sprintf("function %s() { [native code] }", fn.name)) + case _nodeFunctionObject: + return toValue_string(fn.node.source) + case _bindFunctionObject: + return toValue_string("function () { [native code] }") + } + + panic(call.runtime.panicTypeError("Function.toString()")) +} + +func builtinFunction_apply(call FunctionCall) Value { + if !call.This.isCallable() { + panic(call.runtime.panicTypeError()) + } + this := call.Argument(0) + if this.IsUndefined() { + // FIXME Not ECMA5 + this = toValue_object(call.runtime.globalObject) + } + argumentList := call.Argument(1) + switch argumentList.kind { + case valueUndefined, valueNull: + return call.thisObject().call(this, nil, false, nativeFrame) + case valueObject: + default: + panic(call.runtime.panicTypeError()) + } + + arrayObject := argumentList._object() + thisObject := call.thisObject() + length := int64(toUint32(arrayObject.get("length"))) + valueArray := make([]Value, length) + for index := int64(0); index < length; index++ { + valueArray[index] = arrayObject.get(arrayIndexToString(index)) + } + return thisObject.call(this, valueArray, false, nativeFrame) +} + +func builtinFunction_call(call FunctionCall) Value { + if !call.This.isCallable() { + panic(call.runtime.panicTypeError()) + } + thisObject := call.thisObject() + this := call.Argument(0) + if this.IsUndefined() { + // FIXME Not ECMA5 + this = toValue_object(call.runtime.globalObject) + } + if len(call.ArgumentList) >= 1 { + return thisObject.call(this, call.ArgumentList[1:], false, nativeFrame) + } + return thisObject.call(this, nil, false, nativeFrame) +} + +func builtinFunction_bind(call FunctionCall) Value { + target := call.This + if !target.isCallable() { + panic(call.runtime.panicTypeError()) + } + targetObject := target._object() + + this := call.Argument(0) + argumentList := call.slice(1) + if this.IsUndefined() { + // FIXME Do this elsewhere? + this = toValue_object(call.runtime.globalObject) + } + + return toValue_object(call.runtime.newBoundFunction(targetObject, this, argumentList)) +} diff --git a/vendor/github.com/robertkrimen/otto/builtin_json.go b/vendor/github.com/robertkrimen/otto/builtin_json.go new file mode 100644 index 000000000..aed54bf12 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin_json.go @@ -0,0 +1,299 @@ +package otto + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type _builtinJSON_parseContext struct { + call FunctionCall + reviver Value +} + +func builtinJSON_parse(call FunctionCall) Value { + ctx := _builtinJSON_parseContext{ + call: call, + } + revive := false + if reviver := call.Argument(1); reviver.isCallable() { + revive = true + ctx.reviver = reviver + } + + var root interface{} + err := json.Unmarshal([]byte(call.Argument(0).string()), &root) + if err != nil { + panic(call.runtime.panicSyntaxError(err.Error())) + } + value, exists := builtinJSON_parseWalk(ctx, root) + if !exists { + value = Value{} + } + if revive { + root := ctx.call.runtime.newObject() + root.put("", value, false) + return builtinJSON_reviveWalk(ctx, root, "") + } + return value +} + +func builtinJSON_reviveWalk(ctx _builtinJSON_parseContext, holder *_object, name string) Value { + value := holder.get(name) + if object := value._object(); object != nil { + if isArray(object) { + length := int64(objectLength(object)) + for index := int64(0); index < length; index += 1 { + name := arrayIndexToString(index) + value := builtinJSON_reviveWalk(ctx, object, name) + if value.IsUndefined() { + object.delete(name, false) + } else { + object.defineProperty(name, value, 0111, false) + } + } + } else { + object.enumerate(false, func(name string) bool { + value := builtinJSON_reviveWalk(ctx, object, name) + if value.IsUndefined() { + object.delete(name, false) + } else { + object.defineProperty(name, value, 0111, false) + } + return true + }) + } + } + return ctx.reviver.call(ctx.call.runtime, toValue_object(holder), name, value) +} + +func builtinJSON_parseWalk(ctx _builtinJSON_parseContext, rawValue interface{}) (Value, bool) { + switch value := rawValue.(type) { + case nil: + return nullValue, true + case bool: + return toValue_bool(value), true + case string: + return toValue_string(value), true + case float64: + return toValue_float64(value), true + case []interface{}: + arrayValue := make([]Value, len(value)) + for index, rawValue := range value { + if value, exists := builtinJSON_parseWalk(ctx, rawValue); exists { + arrayValue[index] = value + } + } + return toValue_object(ctx.call.runtime.newArrayOf(arrayValue)), true + case map[string]interface{}: + object := ctx.call.runtime.newObject() + for name, rawValue := range value { + if value, exists := builtinJSON_parseWalk(ctx, rawValue); exists { + object.put(name, value, false) + } + } + return toValue_object(object), true + } + return Value{}, false +} + +type _builtinJSON_stringifyContext struct { + call FunctionCall + stack []*_object + propertyList []string + replacerFunction *Value + gap string +} + +func builtinJSON_stringify(call FunctionCall) Value { + ctx := _builtinJSON_stringifyContext{ + call: call, + stack: []*_object{nil}, + } + replacer := call.Argument(1)._object() + if replacer != nil { + if isArray(replacer) { + length := objectLength(replacer) + seen := map[string]bool{} + propertyList := make([]string, length) + length = 0 + for index, _ := range propertyList { + value := replacer.get(arrayIndexToString(int64(index))) + switch value.kind { + case valueObject: + switch value.value.(*_object).class { + case "String": + case "Number": + default: + continue + } + case valueString: + case valueNumber: + default: + continue + } + name := value.string() + if seen[name] { + continue + } + seen[name] = true + length += 1 + propertyList[index] = name + } + ctx.propertyList = propertyList[0:length] + } else if replacer.class == "Function" { + value := toValue_object(replacer) + ctx.replacerFunction = &value + } + } + if spaceValue, exists := call.getArgument(2); exists { + if spaceValue.kind == valueObject { + switch spaceValue.value.(*_object).class { + case "String": + spaceValue = toValue_string(spaceValue.string()) + case "Number": + spaceValue = spaceValue.numberValue() + } + } + switch spaceValue.kind { + case valueString: + value := spaceValue.string() + if len(value) > 10 { + ctx.gap = value[0:10] + } else { + ctx.gap = value + } + case valueNumber: + value := spaceValue.number().int64 + if value > 10 { + value = 10 + } else if value < 0 { + value = 0 + } + ctx.gap = strings.Repeat(" ", int(value)) + } + } + holder := call.runtime.newObject() + holder.put("", call.Argument(0), false) + value, exists := builtinJSON_stringifyWalk(ctx, "", holder) + if !exists { + return Value{} + } + valueJSON, err := json.Marshal(value) + if err != nil { + panic(call.runtime.panicTypeError(err.Error())) + } + if ctx.gap != "" { + valueJSON1 := bytes.Buffer{} + json.Indent(&valueJSON1, valueJSON, "", ctx.gap) + valueJSON = valueJSON1.Bytes() + } + return toValue_string(string(valueJSON)) +} + +func builtinJSON_stringifyWalk(ctx _builtinJSON_stringifyContext, key string, holder *_object) (interface{}, bool) { + value := holder.get(key) + + if value.IsObject() { + object := value._object() + if toJSON := object.get("toJSON"); toJSON.IsFunction() { + value = toJSON.call(ctx.call.runtime, value, key) + } else { + // If the object is a GoStruct or something that implements json.Marshaler + if object.objectClass.marshalJSON != nil { + marshaler := object.objectClass.marshalJSON(object) + if marshaler != nil { + return marshaler, true + } + } + } + } + + if ctx.replacerFunction != nil { + value = (*ctx.replacerFunction).call(ctx.call.runtime, toValue_object(holder), key, value) + } + + if value.kind == valueObject { + switch value.value.(*_object).class { + case "Boolean": + value = value._object().value.(Value) + case "String": + value = toValue_string(value.string()) + case "Number": + value = value.numberValue() + } + } + + switch value.kind { + case valueBoolean: + return value.bool(), true + case valueString: + return value.string(), true + case valueNumber: + integer := value.number() + switch integer.kind { + case numberInteger: + return integer.int64, true + case numberFloat: + return integer.float64, true + default: + return nil, true + } + case valueNull: + return nil, true + case valueObject: + holder := value._object() + if value := value._object(); nil != value { + for _, object := range ctx.stack { + if holder == object { + panic(ctx.call.runtime.panicTypeError("Converting circular structure to JSON")) + } + } + ctx.stack = append(ctx.stack, value) + defer func() { ctx.stack = ctx.stack[:len(ctx.stack)-1] }() + } + if isArray(holder) { + var length uint32 + switch value := holder.get("length").value.(type) { + case uint32: + length = value + case int: + if value >= 0 { + length = uint32(value) + } + default: + panic(ctx.call.runtime.panicTypeError(fmt.Sprintf("JSON.stringify: invalid length: %v (%[1]T)", value))) + } + array := make([]interface{}, length) + for index, _ := range array { + name := arrayIndexToString(int64(index)) + value, _ := builtinJSON_stringifyWalk(ctx, name, holder) + array[index] = value + } + return array, true + } else if holder.class != "Function" { + object := map[string]interface{}{} + if ctx.propertyList != nil { + for _, name := range ctx.propertyList { + value, exists := builtinJSON_stringifyWalk(ctx, name, holder) + if exists { + object[name] = value + } + } + } else { + // Go maps are without order, so this doesn't conform to the ECMA ordering + // standard, but oh well... + holder.enumerate(false, func(name string) bool { + value, exists := builtinJSON_stringifyWalk(ctx, name, holder) + if exists { + object[name] = value + } + return true + }) + } + return object, true + } + } + return nil, false +} diff --git a/vendor/github.com/robertkrimen/otto/builtin_math.go b/vendor/github.com/robertkrimen/otto/builtin_math.go new file mode 100644 index 000000000..7ce90c339 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin_math.go @@ -0,0 +1,151 @@ +package otto + +import ( + "math" + "math/rand" +) + +// Math + +func builtinMath_abs(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Abs(number)) +} + +func builtinMath_acos(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Acos(number)) +} + +func builtinMath_asin(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Asin(number)) +} + +func builtinMath_atan(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Atan(number)) +} + +func builtinMath_atan2(call FunctionCall) Value { + y := call.Argument(0).float64() + if math.IsNaN(y) { + return NaNValue() + } + x := call.Argument(1).float64() + if math.IsNaN(x) { + return NaNValue() + } + return toValue_float64(math.Atan2(y, x)) +} + +func builtinMath_cos(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Cos(number)) +} + +func builtinMath_ceil(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Ceil(number)) +} + +func builtinMath_exp(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Exp(number)) +} + +func builtinMath_floor(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Floor(number)) +} + +func builtinMath_log(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Log(number)) +} + +func builtinMath_max(call FunctionCall) Value { + switch len(call.ArgumentList) { + case 0: + return negativeInfinityValue() + case 1: + return toValue_float64(call.ArgumentList[0].float64()) + } + result := call.ArgumentList[0].float64() + if math.IsNaN(result) { + return NaNValue() + } + for _, value := range call.ArgumentList[1:] { + value := value.float64() + if math.IsNaN(value) { + return NaNValue() + } + result = math.Max(result, value) + } + return toValue_float64(result) +} + +func builtinMath_min(call FunctionCall) Value { + switch len(call.ArgumentList) { + case 0: + return positiveInfinityValue() + case 1: + return toValue_float64(call.ArgumentList[0].float64()) + } + result := call.ArgumentList[0].float64() + if math.IsNaN(result) { + return NaNValue() + } + for _, value := range call.ArgumentList[1:] { + value := value.float64() + if math.IsNaN(value) { + return NaNValue() + } + result = math.Min(result, value) + } + return toValue_float64(result) +} + +func builtinMath_pow(call FunctionCall) Value { + // TODO Make sure this works according to the specification (15.8.2.13) + x := call.Argument(0).float64() + y := call.Argument(1).float64() + if math.Abs(x) == 1 && math.IsInf(y, 0) { + return NaNValue() + } + return toValue_float64(math.Pow(x, y)) +} + +func builtinMath_random(call FunctionCall) Value { + var v float64 + if call.runtime.random != nil { + v = call.runtime.random() + } else { + v = rand.Float64() + } + return toValue_float64(v) +} + +func builtinMath_round(call FunctionCall) Value { + number := call.Argument(0).float64() + value := math.Floor(number + 0.5) + if value == 0 { + value = math.Copysign(0, number) + } + return toValue_float64(value) +} + +func builtinMath_sin(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Sin(number)) +} + +func builtinMath_sqrt(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Sqrt(number)) +} + +func builtinMath_tan(call FunctionCall) Value { + number := call.Argument(0).float64() + return toValue_float64(math.Tan(number)) +} diff --git a/vendor/github.com/robertkrimen/otto/builtin_number.go b/vendor/github.com/robertkrimen/otto/builtin_number.go new file mode 100644 index 000000000..f99a42a2f --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin_number.go @@ -0,0 +1,93 @@ +package otto + +import ( + "math" + "strconv" +) + +// Number + +func numberValueFromNumberArgumentList(argumentList []Value) Value { + if len(argumentList) > 0 { + return argumentList[0].numberValue() + } + return toValue_int(0) +} + +func builtinNumber(call FunctionCall) Value { + return numberValueFromNumberArgumentList(call.ArgumentList) +} + +func builtinNewNumber(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newNumber(numberValueFromNumberArgumentList(argumentList))) +} + +func builtinNumber_toString(call FunctionCall) Value { + // Will throw a TypeError if ThisObject is not a Number + value := call.thisClassObject("Number").primitiveValue() + radix := 10 + radixArgument := call.Argument(0) + if radixArgument.IsDefined() { + integer := toIntegerFloat(radixArgument) + if integer < 2 || integer > 36 { + panic(call.runtime.panicRangeError("toString() radix must be between 2 and 36")) + } + radix = int(integer) + } + if radix == 10 { + return toValue_string(value.string()) + } + return toValue_string(numberToStringRadix(value, radix)) +} + +func builtinNumber_valueOf(call FunctionCall) Value { + return call.thisClassObject("Number").primitiveValue() +} + +func builtinNumber_toFixed(call FunctionCall) Value { + precision := toIntegerFloat(call.Argument(0)) + if 20 < precision || 0 > precision { + panic(call.runtime.panicRangeError("toFixed() precision must be between 0 and 20")) + } + if call.This.IsNaN() { + return toValue_string("NaN") + } + value := call.This.float64() + if math.Abs(value) >= 1e21 { + return toValue_string(floatToString(value, 64)) + } + return toValue_string(strconv.FormatFloat(call.This.float64(), 'f', int(precision), 64)) +} + +func builtinNumber_toExponential(call FunctionCall) Value { + if call.This.IsNaN() { + return toValue_string("NaN") + } + precision := float64(-1) + if value := call.Argument(0); value.IsDefined() { + precision = toIntegerFloat(value) + if 0 > precision { + panic(call.runtime.panicRangeError("toString() radix must be between 2 and 36")) + } + } + return toValue_string(strconv.FormatFloat(call.This.float64(), 'e', int(precision), 64)) +} + +func builtinNumber_toPrecision(call FunctionCall) Value { + if call.This.IsNaN() { + return toValue_string("NaN") + } + value := call.Argument(0) + if value.IsUndefined() { + return toValue_string(call.This.string()) + } + precision := toIntegerFloat(value) + if 1 > precision { + panic(call.runtime.panicRangeError("toPrecision() precision must be greater than 1")) + } + return toValue_string(strconv.FormatFloat(call.This.float64(), 'g', int(precision), 64)) +} + +func builtinNumber_toLocaleString(call FunctionCall) Value { + return builtinNumber_toString(call) +} diff --git a/vendor/github.com/robertkrimen/otto/builtin_object.go b/vendor/github.com/robertkrimen/otto/builtin_object.go new file mode 100644 index 000000000..c2433f7be --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin_object.go @@ -0,0 +1,289 @@ +package otto + +import ( + "fmt" +) + +// Object + +func builtinObject(call FunctionCall) Value { + value := call.Argument(0) + switch value.kind { + case valueUndefined, valueNull: + return toValue_object(call.runtime.newObject()) + } + + return toValue_object(call.runtime.toObject(value)) +} + +func builtinNewObject(self *_object, argumentList []Value) Value { + value := valueOfArrayIndex(argumentList, 0) + switch value.kind { + case valueNull, valueUndefined: + case valueNumber, valueString, valueBoolean: + return toValue_object(self.runtime.toObject(value)) + case valueObject: + return value + default: + } + return toValue_object(self.runtime.newObject()) +} + +func builtinObject_valueOf(call FunctionCall) Value { + return toValue_object(call.thisObject()) +} + +func builtinObject_hasOwnProperty(call FunctionCall) Value { + propertyName := call.Argument(0).string() + thisObject := call.thisObject() + return toValue_bool(thisObject.hasOwnProperty(propertyName)) +} + +func builtinObject_isPrototypeOf(call FunctionCall) Value { + value := call.Argument(0) + if !value.IsObject() { + return falseValue + } + prototype := call.toObject(value).prototype + thisObject := call.thisObject() + for prototype != nil { + if thisObject == prototype { + return trueValue + } + prototype = prototype.prototype + } + return falseValue +} + +func builtinObject_propertyIsEnumerable(call FunctionCall) Value { + propertyName := call.Argument(0).string() + thisObject := call.thisObject() + property := thisObject.getOwnProperty(propertyName) + if property != nil && property.enumerable() { + return trueValue + } + return falseValue +} + +func builtinObject_toString(call FunctionCall) Value { + result := "" + if call.This.IsUndefined() { + result = "[object Undefined]" + } else if call.This.IsNull() { + result = "[object Null]" + } else { + result = fmt.Sprintf("[object %s]", call.thisObject().class) + } + return toValue_string(result) +} + +func builtinObject_toLocaleString(call FunctionCall) Value { + toString := call.thisObject().get("toString") + if !toString.isCallable() { + panic(call.runtime.panicTypeError()) + } + return toString.call(call.runtime, call.This) +} + +func builtinObject_getPrototypeOf(call FunctionCall) Value { + objectValue := call.Argument(0) + object := objectValue._object() + if object == nil { + panic(call.runtime.panicTypeError()) + } + + if object.prototype == nil { + return nullValue + } + + return toValue_object(object.prototype) +} + +func builtinObject_getOwnPropertyDescriptor(call FunctionCall) Value { + objectValue := call.Argument(0) + object := objectValue._object() + if object == nil { + panic(call.runtime.panicTypeError()) + } + + name := call.Argument(1).string() + descriptor := object.getOwnProperty(name) + if descriptor == nil { + return Value{} + } + return toValue_object(call.runtime.fromPropertyDescriptor(*descriptor)) +} + +func builtinObject_defineProperty(call FunctionCall) Value { + objectValue := call.Argument(0) + object := objectValue._object() + if object == nil { + panic(call.runtime.panicTypeError()) + } + name := call.Argument(1).string() + descriptor := toPropertyDescriptor(call.runtime, call.Argument(2)) + object.defineOwnProperty(name, descriptor, true) + return objectValue +} + +func builtinObject_defineProperties(call FunctionCall) Value { + objectValue := call.Argument(0) + object := objectValue._object() + if object == nil { + panic(call.runtime.panicTypeError()) + } + + properties := call.runtime.toObject(call.Argument(1)) + properties.enumerate(false, func(name string) bool { + descriptor := toPropertyDescriptor(call.runtime, properties.get(name)) + object.defineOwnProperty(name, descriptor, true) + return true + }) + + return objectValue +} + +func builtinObject_create(call FunctionCall) Value { + prototypeValue := call.Argument(0) + if !prototypeValue.IsNull() && !prototypeValue.IsObject() { + panic(call.runtime.panicTypeError()) + } + + object := call.runtime.newObject() + object.prototype = prototypeValue._object() + + propertiesValue := call.Argument(1) + if propertiesValue.IsDefined() { + properties := call.runtime.toObject(propertiesValue) + properties.enumerate(false, func(name string) bool { + descriptor := toPropertyDescriptor(call.runtime, properties.get(name)) + object.defineOwnProperty(name, descriptor, true) + return true + }) + } + + return toValue_object(object) +} + +func builtinObject_isExtensible(call FunctionCall) Value { + object := call.Argument(0) + if object := object._object(); object != nil { + return toValue_bool(object.extensible) + } + panic(call.runtime.panicTypeError()) +} + +func builtinObject_preventExtensions(call FunctionCall) Value { + object := call.Argument(0) + if object := object._object(); object != nil { + object.extensible = false + } else { + panic(call.runtime.panicTypeError()) + } + return object +} + +func builtinObject_isSealed(call FunctionCall) Value { + object := call.Argument(0) + if object := object._object(); object != nil { + if object.extensible { + return toValue_bool(false) + } + result := true + object.enumerate(true, func(name string) bool { + property := object.getProperty(name) + if property.configurable() { + result = false + } + return true + }) + return toValue_bool(result) + } + panic(call.runtime.panicTypeError()) +} + +func builtinObject_seal(call FunctionCall) Value { + object := call.Argument(0) + if object := object._object(); object != nil { + object.enumerate(true, func(name string) bool { + if property := object.getOwnProperty(name); nil != property && property.configurable() { + property.configureOff() + object.defineOwnProperty(name, *property, true) + } + return true + }) + object.extensible = false + } else { + panic(call.runtime.panicTypeError()) + } + return object +} + +func builtinObject_isFrozen(call FunctionCall) Value { + object := call.Argument(0) + if object := object._object(); object != nil { + if object.extensible { + return toValue_bool(false) + } + result := true + object.enumerate(true, func(name string) bool { + property := object.getProperty(name) + if property.configurable() || property.writable() { + result = false + } + return true + }) + return toValue_bool(result) + } + panic(call.runtime.panicTypeError()) +} + +func builtinObject_freeze(call FunctionCall) Value { + object := call.Argument(0) + if object := object._object(); object != nil { + object.enumerate(true, func(name string) bool { + if property, update := object.getOwnProperty(name), false; nil != property { + if property.isDataDescriptor() && property.writable() { + property.writeOff() + update = true + } + if property.configurable() { + property.configureOff() + update = true + } + if update { + object.defineOwnProperty(name, *property, true) + } + } + return true + }) + object.extensible = false + } else { + panic(call.runtime.panicTypeError()) + } + return object +} + +func builtinObject_keys(call FunctionCall) Value { + if object, keys := call.Argument(0)._object(), []Value(nil); nil != object { + object.enumerate(false, func(name string) bool { + keys = append(keys, toValue_string(name)) + return true + }) + return toValue_object(call.runtime.newArrayOf(keys)) + } + panic(call.runtime.panicTypeError()) +} + +func builtinObject_getOwnPropertyNames(call FunctionCall) Value { + if object, propertyNames := call.Argument(0)._object(), []Value(nil); nil != object { + object.enumerate(true, func(name string) bool { + if object.hasOwnProperty(name) { + propertyNames = append(propertyNames, toValue_string(name)) + } + return true + }) + return toValue_object(call.runtime.newArrayOf(propertyNames)) + } + panic(call.runtime.panicTypeError()) +} diff --git a/vendor/github.com/robertkrimen/otto/builtin_regexp.go b/vendor/github.com/robertkrimen/otto/builtin_regexp.go new file mode 100644 index 000000000..99422510d --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin_regexp.go @@ -0,0 +1,65 @@ +package otto + +import ( + "fmt" +) + +// RegExp + +func builtinRegExp(call FunctionCall) Value { + pattern := call.Argument(0) + flags := call.Argument(1) + if object := pattern._object(); object != nil { + if object.class == "RegExp" && flags.IsUndefined() { + return pattern + } + } + return toValue_object(call.runtime.newRegExp(pattern, flags)) +} + +func builtinNewRegExp(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newRegExp( + valueOfArrayIndex(argumentList, 0), + valueOfArrayIndex(argumentList, 1), + )) +} + +func builtinRegExp_toString(call FunctionCall) Value { + thisObject := call.thisObject() + source := thisObject.get("source").string() + flags := []byte{} + if thisObject.get("global").bool() { + flags = append(flags, 'g') + } + if thisObject.get("ignoreCase").bool() { + flags = append(flags, 'i') + } + if thisObject.get("multiline").bool() { + flags = append(flags, 'm') + } + return toValue_string(fmt.Sprintf("/%s/%s", source, flags)) +} + +func builtinRegExp_exec(call FunctionCall) Value { + thisObject := call.thisObject() + target := call.Argument(0).string() + match, result := execRegExp(thisObject, target) + if !match { + return nullValue + } + return toValue_object(execResultToArray(call.runtime, target, result)) +} + +func builtinRegExp_test(call FunctionCall) Value { + thisObject := call.thisObject() + target := call.Argument(0).string() + match, _ := execRegExp(thisObject, target) + return toValue_bool(match) +} + +func builtinRegExp_compile(call FunctionCall) Value { + // This (useless) function is deprecated, but is here to provide some + // semblance of compatibility. + // Caveat emptor: it may not be around for long. + return Value{} +} diff --git a/vendor/github.com/robertkrimen/otto/builtin_string.go b/vendor/github.com/robertkrimen/otto/builtin_string.go new file mode 100644 index 000000000..6a1718458 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/builtin_string.go @@ -0,0 +1,500 @@ +package otto + +import ( + "bytes" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +// String + +func stringValueFromStringArgumentList(argumentList []Value) Value { + if len(argumentList) > 0 { + return toValue_string(argumentList[0].string()) + } + return toValue_string("") +} + +func builtinString(call FunctionCall) Value { + return stringValueFromStringArgumentList(call.ArgumentList) +} + +func builtinNewString(self *_object, argumentList []Value) Value { + return toValue_object(self.runtime.newString(stringValueFromStringArgumentList(argumentList))) +} + +func builtinString_toString(call FunctionCall) Value { + return call.thisClassObject("String").primitiveValue() +} +func builtinString_valueOf(call FunctionCall) Value { + return call.thisClassObject("String").primitiveValue() +} + +func builtinString_fromCharCode(call FunctionCall) Value { + chrList := make([]uint16, len(call.ArgumentList)) + for index, value := range call.ArgumentList { + chrList[index] = toUint16(value) + } + return toValue_string16(chrList) +} + +func builtinString_charAt(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + idx := int(call.Argument(0).number().int64) + chr := stringAt(call.This._object().stringValue(), idx) + if chr == utf8.RuneError { + return toValue_string("") + } + return toValue_string(string(chr)) +} + +func builtinString_charCodeAt(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + idx := int(call.Argument(0).number().int64) + chr := stringAt(call.This._object().stringValue(), idx) + if chr == utf8.RuneError { + return NaNValue() + } + return toValue_uint16(uint16(chr)) +} + +func builtinString_concat(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + var value bytes.Buffer + value.WriteString(call.This.string()) + for _, item := range call.ArgumentList { + value.WriteString(item.string()) + } + return toValue_string(value.String()) +} + +func builtinString_indexOf(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + value := call.This.string() + target := call.Argument(0).string() + if 2 > len(call.ArgumentList) { + return toValue_int(strings.Index(value, target)) + } + start := toIntegerFloat(call.Argument(1)) + if 0 > start { + start = 0 + } else if start >= float64(len(value)) { + if target == "" { + return toValue_int(len(value)) + } + return toValue_int(-1) + } + index := strings.Index(value[int(start):], target) + if index >= 0 { + index += int(start) + } + return toValue_int(index) +} + +func builtinString_lastIndexOf(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + value := call.This.string() + target := call.Argument(0).string() + if 2 > len(call.ArgumentList) || call.ArgumentList[1].IsUndefined() { + return toValue_int(strings.LastIndex(value, target)) + } + length := len(value) + if length == 0 { + return toValue_int(strings.LastIndex(value, target)) + } + start := call.ArgumentList[1].number() + if start.kind == numberInfinity { // FIXME + // startNumber is infinity, so start is the end of string (start = length) + return toValue_int(strings.LastIndex(value, target)) + } + if 0 > start.int64 { + start.int64 = 0 + } + end := int(start.int64) + len(target) + if end > length { + end = length + } + return toValue_int(strings.LastIndex(value[:end], target)) +} + +func builtinString_match(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + target := call.This.string() + matcherValue := call.Argument(0) + matcher := matcherValue._object() + if !matcherValue.IsObject() || matcher.class != "RegExp" { + matcher = call.runtime.newRegExp(matcherValue, Value{}) + } + global := matcher.get("global").bool() + if !global { + match, result := execRegExp(matcher, target) + if !match { + return nullValue + } + return toValue_object(execResultToArray(call.runtime, target, result)) + } + + { + result := matcher.regExpValue().regularExpression.FindAllStringIndex(target, -1) + matchCount := len(result) + if result == nil { + matcher.put("lastIndex", toValue_int(0), true) + return Value{} // !match + } + matchCount = len(result) + valueArray := make([]Value, matchCount) + for index := 0; index < matchCount; index++ { + valueArray[index] = toValue_string(target[result[index][0]:result[index][1]]) + } + matcher.put("lastIndex", toValue_int(result[matchCount-1][1]), true) + return toValue_object(call.runtime.newArrayOf(valueArray)) + } +} + +var builtinString_replace_Regexp = regexp.MustCompile("\\$(?:[\\$\\&\\'\\`1-9]|0[1-9]|[1-9][0-9])") + +func builtinString_findAndReplaceString(input []byte, lastIndex int, match []int, target []byte, replaceValue []byte) (output []byte) { + matchCount := len(match) / 2 + output = input + if match[0] != lastIndex { + output = append(output, target[lastIndex:match[0]]...) + } + replacement := builtinString_replace_Regexp.ReplaceAllFunc(replaceValue, func(part []byte) []byte { + // TODO Check if match[0] or match[1] can be -1 in this scenario + switch part[1] { + case '$': + return []byte{'$'} + case '&': + return target[match[0]:match[1]] + case '`': + return target[:match[0]] + case '\'': + return target[match[1]:len(target)] + } + matchNumberParse, error := strconv.ParseInt(string(part[1:]), 10, 64) + matchNumber := int(matchNumberParse) + if error != nil || matchNumber >= matchCount { + return []byte{} + } + offset := 2 * matchNumber + if match[offset] != -1 { + return target[match[offset]:match[offset+1]] + } + return []byte{} // The empty string + }) + output = append(output, replacement...) + return output +} + +func builtinString_replace(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + target := []byte(call.This.string()) + searchValue := call.Argument(0) + searchObject := searchValue._object() + + // TODO If a capture is -1? + var search *regexp.Regexp + global := false + find := 1 + if searchValue.IsObject() && searchObject.class == "RegExp" { + regExp := searchObject.regExpValue() + search = regExp.regularExpression + if regExp.global { + find = -1 + } + } else { + search = regexp.MustCompile(regexp.QuoteMeta(searchValue.string())) + } + + found := search.FindAllSubmatchIndex(target, find) + if found == nil { + return toValue_string(string(target)) // !match + } + + { + lastIndex := 0 + result := []byte{} + + replaceValue := call.Argument(1) + if replaceValue.isCallable() { + target := string(target) + replace := replaceValue._object() + for _, match := range found { + if match[0] != lastIndex { + result = append(result, target[lastIndex:match[0]]...) + } + matchCount := len(match) / 2 + argumentList := make([]Value, matchCount+2) + for index := 0; index < matchCount; index++ { + offset := 2 * index + if match[offset] != -1 { + argumentList[index] = toValue_string(target[match[offset]:match[offset+1]]) + } else { + argumentList[index] = Value{} + } + } + argumentList[matchCount+0] = toValue_int(match[0]) + argumentList[matchCount+1] = toValue_string(target) + replacement := replace.call(Value{}, argumentList, false, nativeFrame).string() + result = append(result, []byte(replacement)...) + lastIndex = match[1] + } + + } else { + replace := []byte(replaceValue.string()) + for _, match := range found { + result = builtinString_findAndReplaceString(result, lastIndex, match, target, replace) + lastIndex = match[1] + } + } + + if lastIndex != len(target) { + result = append(result, target[lastIndex:]...) + } + + if global && searchObject != nil { + searchObject.put("lastIndex", toValue_int(lastIndex), true) + } + + return toValue_string(string(result)) + } +} + +func builtinString_search(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + target := call.This.string() + searchValue := call.Argument(0) + search := searchValue._object() + if !searchValue.IsObject() || search.class != "RegExp" { + search = call.runtime.newRegExp(searchValue, Value{}) + } + result := search.regExpValue().regularExpression.FindStringIndex(target) + if result == nil { + return toValue_int(-1) + } + return toValue_int(result[0]) +} + +func stringSplitMatch(target string, targetLength int64, index uint, search string, searchLength int64) (bool, uint) { + if int64(index)+searchLength > searchLength { + return false, 0 + } + found := strings.Index(target[index:], search) + if 0 > found { + return false, 0 + } + return true, uint(found) +} + +func builtinString_split(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + target := call.This.string() + + separatorValue := call.Argument(0) + limitValue := call.Argument(1) + limit := -1 + if limitValue.IsDefined() { + limit = int(toUint32(limitValue)) + } + + if limit == 0 { + return toValue_object(call.runtime.newArray(0)) + } + + if separatorValue.IsUndefined() { + return toValue_object(call.runtime.newArrayOf([]Value{toValue_string(target)})) + } + + if separatorValue.isRegExp() { + targetLength := len(target) + search := separatorValue._object().regExpValue().regularExpression + valueArray := []Value{} + result := search.FindAllStringSubmatchIndex(target, -1) + lastIndex := 0 + found := 0 + + for _, match := range result { + if match[0] == match[1] { + // FIXME Ugh, this is a hack + if match[0] == 0 || match[0] == targetLength { + continue + } + } + + if lastIndex != match[0] { + valueArray = append(valueArray, toValue_string(target[lastIndex:match[0]])) + found++ + } else if lastIndex == match[0] { + if lastIndex != -1 { + valueArray = append(valueArray, toValue_string("")) + found++ + } + } + + lastIndex = match[1] + if found == limit { + goto RETURN + } + + captureCount := len(match) / 2 + for index := 1; index < captureCount; index++ { + offset := index * 2 + value := Value{} + if match[offset] != -1 { + value = toValue_string(target[match[offset]:match[offset+1]]) + } + valueArray = append(valueArray, value) + found++ + if found == limit { + goto RETURN + } + } + } + + if found != limit { + if lastIndex != targetLength { + valueArray = append(valueArray, toValue_string(target[lastIndex:targetLength])) + } else { + valueArray = append(valueArray, toValue_string("")) + } + } + + RETURN: + return toValue_object(call.runtime.newArrayOf(valueArray)) + + } else { + separator := separatorValue.string() + + splitLimit := limit + excess := false + if limit > 0 { + splitLimit = limit + 1 + excess = true + } + + split := strings.SplitN(target, separator, splitLimit) + + if excess && len(split) > limit { + split = split[:limit] + } + + valueArray := make([]Value, len(split)) + for index, value := range split { + valueArray[index] = toValue_string(value) + } + + return toValue_object(call.runtime.newArrayOf(valueArray)) + } +} + +func builtinString_slice(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + target := call.This.string() + + length := int64(len(target)) + start, end := rangeStartEnd(call.ArgumentList, length, false) + if end-start <= 0 { + return toValue_string("") + } + return toValue_string(target[start:end]) +} + +func builtinString_substring(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + target := call.This.string() + + length := int64(len(target)) + start, end := rangeStartEnd(call.ArgumentList, length, true) + if start > end { + start, end = end, start + } + return toValue_string(target[start:end]) +} + +func builtinString_substr(call FunctionCall) Value { + target := call.This.string() + + size := int64(len(target)) + start, length := rangeStartLength(call.ArgumentList, size) + + if start >= size { + return toValue_string("") + } + + if length <= 0 { + return toValue_string("") + } + + if start+length >= size { + // Cap length to be to the end of the string + // start = 3, length = 5, size = 4 [0, 1, 2, 3] + // 4 - 3 = 1 + // target[3:4] + length = size - start + } + + return toValue_string(target[start : start+length]) +} + +func builtinString_toLowerCase(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + return toValue_string(strings.ToLower(call.This.string())) +} + +func builtinString_toUpperCase(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + return toValue_string(strings.ToUpper(call.This.string())) +} + +// 7.2 Table 2 — Whitespace Characters & 7.3 Table 3 - Line Terminator Characters +const builtinString_trim_whitespace = "\u0009\u000A\u000B\u000C\u000D\u0020\u00A0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u2028\u2029\u202F\u205F\u3000\uFEFF" + +func builtinString_trim(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + return toValue(strings.Trim(call.This.string(), + builtinString_trim_whitespace)) +} + +// Mozilla extension, not ECMAScript 5 +func builtinString_trimLeft(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + return toValue(strings.TrimLeft(call.This.string(), + builtinString_trim_whitespace)) +} + +// Mozilla extension, not ECMAScript 5 +func builtinString_trimRight(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + return toValue(strings.TrimRight(call.This.string(), + builtinString_trim_whitespace)) +} + +func builtinString_localeCompare(call FunctionCall) Value { + checkObjectCoercible(call.runtime, call.This) + this := call.This.string() + that := call.Argument(0).string() + if this < that { + return toValue_int(-1) + } else if this == that { + return toValue_int(0) + } + return toValue_int(1) +} + +/* +An alternate version of String.trim +func builtinString_trim(call FunctionCall) Value { + checkObjectCoercible(call.This) + return toValue_string(strings.TrimFunc(call.string(.This), isWhiteSpaceOrLineTerminator)) +} +*/ + +func builtinString_toLocaleLowerCase(call FunctionCall) Value { + return builtinString_toLowerCase(call) +} + +func builtinString_toLocaleUpperCase(call FunctionCall) Value { + return builtinString_toUpperCase(call) +} diff --git a/vendor/github.com/robertkrimen/otto/clone.go b/vendor/github.com/robertkrimen/otto/clone.go new file mode 100644 index 000000000..23b59f8ac --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/clone.go @@ -0,0 +1,173 @@ +package otto + +import ( + "fmt" +) + +type _clone struct { + runtime *_runtime + _object map[*_object]*_object + _objectStash map[*_objectStash]*_objectStash + _dclStash map[*_dclStash]*_dclStash + _fnStash map[*_fnStash]*_fnStash +} + +func (in *_runtime) clone() *_runtime { + + in.lck.Lock() + defer in.lck.Unlock() + + out := &_runtime{ + debugger: in.debugger, + random: in.random, + stackLimit: in.stackLimit, + traceLimit: in.traceLimit, + } + + clone := _clone{ + runtime: out, + _object: make(map[*_object]*_object), + _objectStash: make(map[*_objectStash]*_objectStash), + _dclStash: make(map[*_dclStash]*_dclStash), + _fnStash: make(map[*_fnStash]*_fnStash), + } + + globalObject := clone.object(in.globalObject) + out.globalStash = out.newObjectStash(globalObject, nil) + out.globalObject = globalObject + out.global = _global{ + clone.object(in.global.Object), + clone.object(in.global.Function), + clone.object(in.global.Array), + clone.object(in.global.String), + clone.object(in.global.Boolean), + clone.object(in.global.Number), + clone.object(in.global.Math), + clone.object(in.global.Date), + clone.object(in.global.RegExp), + clone.object(in.global.Error), + clone.object(in.global.EvalError), + clone.object(in.global.TypeError), + clone.object(in.global.RangeError), + clone.object(in.global.ReferenceError), + clone.object(in.global.SyntaxError), + clone.object(in.global.URIError), + clone.object(in.global.JSON), + + clone.object(in.global.ObjectPrototype), + clone.object(in.global.FunctionPrototype), + clone.object(in.global.ArrayPrototype), + clone.object(in.global.StringPrototype), + clone.object(in.global.BooleanPrototype), + clone.object(in.global.NumberPrototype), + clone.object(in.global.DatePrototype), + clone.object(in.global.RegExpPrototype), + clone.object(in.global.ErrorPrototype), + clone.object(in.global.EvalErrorPrototype), + clone.object(in.global.TypeErrorPrototype), + clone.object(in.global.RangeErrorPrototype), + clone.object(in.global.ReferenceErrorPrototype), + clone.object(in.global.SyntaxErrorPrototype), + clone.object(in.global.URIErrorPrototype), + } + + out.eval = out.globalObject.property["eval"].value.(Value).value.(*_object) + out.globalObject.prototype = out.global.ObjectPrototype + + // Not sure if this is necessary, but give some help to the GC + clone.runtime = nil + clone._object = nil + clone._objectStash = nil + clone._dclStash = nil + clone._fnStash = nil + + return out +} + +func (clone *_clone) object(in *_object) *_object { + if out, exists := clone._object[in]; exists { + return out + } + out := &_object{} + clone._object[in] = out + return in.objectClass.clone(in, out, clone) +} + +func (clone *_clone) dclStash(in *_dclStash) (*_dclStash, bool) { + if out, exists := clone._dclStash[in]; exists { + return out, true + } + out := &_dclStash{} + clone._dclStash[in] = out + return out, false +} + +func (clone *_clone) objectStash(in *_objectStash) (*_objectStash, bool) { + if out, exists := clone._objectStash[in]; exists { + return out, true + } + out := &_objectStash{} + clone._objectStash[in] = out + return out, false +} + +func (clone *_clone) fnStash(in *_fnStash) (*_fnStash, bool) { + if out, exists := clone._fnStash[in]; exists { + return out, true + } + out := &_fnStash{} + clone._fnStash[in] = out + return out, false +} + +func (clone *_clone) value(in Value) Value { + out := in + switch value := in.value.(type) { + case *_object: + out.value = clone.object(value) + } + return out +} + +func (clone *_clone) valueArray(in []Value) []Value { + out := make([]Value, len(in)) + for index, value := range in { + out[index] = clone.value(value) + } + return out +} + +func (clone *_clone) stash(in _stash) _stash { + if in == nil { + return nil + } + return in.clone(clone) +} + +func (clone *_clone) property(in _property) _property { + out := in + + switch value := in.value.(type) { + case Value: + out.value = clone.value(value) + case _propertyGetSet: + p := _propertyGetSet{} + if value[0] != nil { + p[0] = clone.object(value[0]) + } + if value[1] != nil { + p[1] = clone.object(value[1]) + } + out.value = p + default: + panic(fmt.Errorf("in.value.(Value) != true; in.value is %T", in.value)) + } + + return out +} + +func (clone *_clone) dclProperty(in _dclProperty) _dclProperty { + out := in + out.value = clone.value(in.value) + return out +} diff --git a/vendor/github.com/robertkrimen/otto/cmpl.go b/vendor/github.com/robertkrimen/otto/cmpl.go new file mode 100644 index 000000000..c191b4527 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/cmpl.go @@ -0,0 +1,24 @@ +package otto + +import ( + "github.com/robertkrimen/otto/ast" + "github.com/robertkrimen/otto/file" +) + +type _file struct { + name string + src string + base int // This will always be 1 or greater +} + +type _compiler struct { + file *file.File + program *ast.Program +} + +func (cmpl *_compiler) parse() *_nodeProgram { + if cmpl.program != nil { + cmpl.file = cmpl.program.File + } + return cmpl._parse(cmpl.program) +} diff --git a/vendor/github.com/robertkrimen/otto/cmpl_evaluate.go b/vendor/github.com/robertkrimen/otto/cmpl_evaluate.go new file mode 100644 index 000000000..6741bf394 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/cmpl_evaluate.go @@ -0,0 +1,96 @@ +package otto + +import ( + "strconv" +) + +func (self *_runtime) cmpl_evaluate_nodeProgram(node *_nodeProgram, eval bool) Value { + if !eval { + self.enterGlobalScope() + defer func() { + self.leaveScope() + }() + } + self.cmpl_functionDeclaration(node.functionList) + self.cmpl_variableDeclaration(node.varList) + self.scope.frame.file = node.file + return self.cmpl_evaluate_nodeStatementList(node.body) +} + +func (self *_runtime) cmpl_call_nodeFunction(function *_object, stash *_fnStash, node *_nodeFunctionLiteral, this Value, argumentList []Value) Value { + + indexOfParameterName := make([]string, len(argumentList)) + // function(abc, def, ghi) + // indexOfParameterName[0] = "abc" + // indexOfParameterName[1] = "def" + // indexOfParameterName[2] = "ghi" + // ... + + argumentsFound := false + for index, name := range node.parameterList { + if name == "arguments" { + argumentsFound = true + } + value := Value{} + if index < len(argumentList) { + value = argumentList[index] + indexOfParameterName[index] = name + } + // strict = false + self.scope.lexical.setValue(name, value, false) + } + + if !argumentsFound { + arguments := self.newArgumentsObject(indexOfParameterName, stash, len(argumentList)) + arguments.defineProperty("callee", toValue_object(function), 0101, false) + stash.arguments = arguments + // strict = false + self.scope.lexical.setValue("arguments", toValue_object(arguments), false) + for index, _ := range argumentList { + if index < len(node.parameterList) { + continue + } + indexAsString := strconv.FormatInt(int64(index), 10) + arguments.defineProperty(indexAsString, argumentList[index], 0111, false) + } + } + + self.cmpl_functionDeclaration(node.functionList) + self.cmpl_variableDeclaration(node.varList) + + result := self.cmpl_evaluate_nodeStatement(node.body) + if result.kind == valueResult { + return result + } + + return Value{} +} + +func (self *_runtime) cmpl_functionDeclaration(list []*_nodeFunctionLiteral) { + executionContext := self.scope + eval := executionContext.eval + stash := executionContext.variable + + for _, function := range list { + name := function.name + value := self.cmpl_evaluate_nodeExpression(function) + if !stash.hasBinding(name) { + stash.createBinding(name, eval == true, value) + } else { + // TODO 10.5.5.e + stash.setBinding(name, value, false) // TODO strict + } + } +} + +func (self *_runtime) cmpl_variableDeclaration(list []string) { + executionContext := self.scope + eval := executionContext.eval + stash := executionContext.variable + + for _, name := range list { + if !stash.hasBinding(name) { + stash.createBinding(name, eval == true, Value{}) // TODO strict? + } + } +} diff --git a/vendor/github.com/robertkrimen/otto/cmpl_evaluate_expression.go b/vendor/github.com/robertkrimen/otto/cmpl_evaluate_expression.go new file mode 100644 index 000000000..8586a484f --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/cmpl_evaluate_expression.go @@ -0,0 +1,460 @@ +package otto + +import ( + "fmt" + "math" + "runtime" + + "github.com/robertkrimen/otto/token" +) + +func (self *_runtime) cmpl_evaluate_nodeExpression(node _nodeExpression) Value { + // Allow interpreter interruption + // If the Interrupt channel is nil, then + // we avoid runtime.Gosched() overhead (if any) + // FIXME: Test this + if self.otto.Interrupt != nil { + runtime.Gosched() + select { + case value := <-self.otto.Interrupt: + value() + default: + } + } + + switch node := node.(type) { + + case *_nodeArrayLiteral: + return self.cmpl_evaluate_nodeArrayLiteral(node) + + case *_nodeAssignExpression: + return self.cmpl_evaluate_nodeAssignExpression(node) + + case *_nodeBinaryExpression: + if node.comparison { + return self.cmpl_evaluate_nodeBinaryExpression_comparison(node) + } else { + return self.cmpl_evaluate_nodeBinaryExpression(node) + } + + case *_nodeBracketExpression: + return self.cmpl_evaluate_nodeBracketExpression(node) + + case *_nodeCallExpression: + return self.cmpl_evaluate_nodeCallExpression(node, nil) + + case *_nodeConditionalExpression: + return self.cmpl_evaluate_nodeConditionalExpression(node) + + case *_nodeDotExpression: + return self.cmpl_evaluate_nodeDotExpression(node) + + case *_nodeFunctionLiteral: + var local = self.scope.lexical + if node.name != "" { + local = self.newDeclarationStash(local) + } + + value := toValue_object(self.newNodeFunction(node, local)) + if node.name != "" { + local.createBinding(node.name, false, value) + } + return value + + case *_nodeIdentifier: + name := node.name + // TODO Should be true or false (strictness) depending on context + // getIdentifierReference should not return nil, but we check anyway and panic + // so as not to propagate the nil into something else + reference := getIdentifierReference(self, self.scope.lexical, name, false, _at(node.idx)) + if reference == nil { + // Should never get here! + panic(hereBeDragons("referenceError == nil: " + name)) + } + return toValue(reference) + + case *_nodeLiteral: + return node.value + + case *_nodeNewExpression: + return self.cmpl_evaluate_nodeNewExpression(node) + + case *_nodeObjectLiteral: + return self.cmpl_evaluate_nodeObjectLiteral(node) + + case *_nodeRegExpLiteral: + return toValue_object(self._newRegExp(node.pattern, node.flags)) + + case *_nodeSequenceExpression: + return self.cmpl_evaluate_nodeSequenceExpression(node) + + case *_nodeThisExpression: + return toValue_object(self.scope.this) + + case *_nodeUnaryExpression: + return self.cmpl_evaluate_nodeUnaryExpression(node) + + case *_nodeVariableExpression: + return self.cmpl_evaluate_nodeVariableExpression(node) + } + + panic(fmt.Errorf("Here be dragons: evaluate_nodeExpression(%T)", node)) +} + +func (self *_runtime) cmpl_evaluate_nodeArrayLiteral(node *_nodeArrayLiteral) Value { + + valueArray := []Value{} + + for _, node := range node.value { + if node == nil { + valueArray = append(valueArray, emptyValue) + } else { + valueArray = append(valueArray, self.cmpl_evaluate_nodeExpression(node).resolve()) + } + } + + result := self.newArrayOf(valueArray) + + return toValue_object(result) +} + +func (self *_runtime) cmpl_evaluate_nodeAssignExpression(node *_nodeAssignExpression) Value { + + left := self.cmpl_evaluate_nodeExpression(node.left) + right := self.cmpl_evaluate_nodeExpression(node.right) + rightValue := right.resolve() + + result := rightValue + if node.operator != token.ASSIGN { + result = self.calculateBinaryExpression(node.operator, left, rightValue) + } + + self.putValue(left.reference(), result) + + return result +} + +func (self *_runtime) cmpl_evaluate_nodeBinaryExpression(node *_nodeBinaryExpression) Value { + + left := self.cmpl_evaluate_nodeExpression(node.left) + leftValue := left.resolve() + + switch node.operator { + // Logical + case token.LOGICAL_AND: + if !leftValue.bool() { + return leftValue + } + right := self.cmpl_evaluate_nodeExpression(node.right) + return right.resolve() + case token.LOGICAL_OR: + if leftValue.bool() { + return leftValue + } + right := self.cmpl_evaluate_nodeExpression(node.right) + return right.resolve() + } + + return self.calculateBinaryExpression(node.operator, leftValue, self.cmpl_evaluate_nodeExpression(node.right)) +} + +func (self *_runtime) cmpl_evaluate_nodeBinaryExpression_comparison(node *_nodeBinaryExpression) Value { + + left := self.cmpl_evaluate_nodeExpression(node.left).resolve() + right := self.cmpl_evaluate_nodeExpression(node.right).resolve() + + return toValue_bool(self.calculateComparison(node.operator, left, right)) +} + +func (self *_runtime) cmpl_evaluate_nodeBracketExpression(node *_nodeBracketExpression) Value { + target := self.cmpl_evaluate_nodeExpression(node.left) + targetValue := target.resolve() + member := self.cmpl_evaluate_nodeExpression(node.member) + memberValue := member.resolve() + + // TODO Pass in base value as-is, and defer toObject till later? + object, err := self.objectCoerce(targetValue) + if err != nil { + panic(self.panicTypeError("Cannot access member '%s' of %s", memberValue.string(), err.Error(), _at(node.idx))) + } + return toValue(newPropertyReference(self, object, memberValue.string(), false, _at(node.idx))) +} + +func (self *_runtime) cmpl_evaluate_nodeCallExpression(node *_nodeCallExpression, withArgumentList []interface{}) Value { + rt := self + this := Value{} + callee := self.cmpl_evaluate_nodeExpression(node.callee) + + argumentList := []Value{} + if withArgumentList != nil { + argumentList = self.toValueArray(withArgumentList...) + } else { + for _, argumentNode := range node.argumentList { + argumentList = append(argumentList, self.cmpl_evaluate_nodeExpression(argumentNode).resolve()) + } + } + + rf := callee.reference() + vl := callee.resolve() + + eval := false // Whether this call is a (candidate for) direct call to eval + name := "" + if rf != nil { + switch rf := rf.(type) { + case *_propertyReference: + name = rf.name + object := rf.base + this = toValue_object(object) + eval = rf.name == "eval" // Possible direct eval + case *_stashReference: + // TODO ImplicitThisValue + name = rf.name + eval = rf.name == "eval" // Possible direct eval + default: + // FIXME? + panic(rt.panicTypeError("Here be dragons")) + } + } + + at := _at(-1) + switch callee := node.callee.(type) { + case *_nodeIdentifier: + at = _at(callee.idx) + case *_nodeDotExpression: + at = _at(callee.idx) + case *_nodeBracketExpression: + at = _at(callee.idx) + } + + frame := _frame{ + callee: name, + file: self.scope.frame.file, + } + + if !vl.IsFunction() { + if name == "" { + // FIXME Maybe typeof? + panic(rt.panicTypeError("%v is not a function", vl, at)) + } + panic(rt.panicTypeError("'%s' is not a function", name, at)) + } + + self.scope.frame.offset = int(at) + + return vl._object().call(this, argumentList, eval, frame) +} + +func (self *_runtime) cmpl_evaluate_nodeConditionalExpression(node *_nodeConditionalExpression) Value { + test := self.cmpl_evaluate_nodeExpression(node.test) + testValue := test.resolve() + if testValue.bool() { + return self.cmpl_evaluate_nodeExpression(node.consequent) + } + return self.cmpl_evaluate_nodeExpression(node.alternate) +} + +func (self *_runtime) cmpl_evaluate_nodeDotExpression(node *_nodeDotExpression) Value { + target := self.cmpl_evaluate_nodeExpression(node.left) + targetValue := target.resolve() + // TODO Pass in base value as-is, and defer toObject till later? + object, err := self.objectCoerce(targetValue) + if err != nil { + panic(self.panicTypeError("Cannot access member '%s' of %s", node.identifier, err.Error(), _at(node.idx))) + } + return toValue(newPropertyReference(self, object, node.identifier, false, _at(node.idx))) +} + +func (self *_runtime) cmpl_evaluate_nodeNewExpression(node *_nodeNewExpression) Value { + rt := self + callee := self.cmpl_evaluate_nodeExpression(node.callee) + + argumentList := []Value{} + for _, argumentNode := range node.argumentList { + argumentList = append(argumentList, self.cmpl_evaluate_nodeExpression(argumentNode).resolve()) + } + + rf := callee.reference() + vl := callee.resolve() + + name := "" + if rf != nil { + switch rf := rf.(type) { + case *_propertyReference: + name = rf.name + case *_stashReference: + name = rf.name + default: + panic(rt.panicTypeError("Here be dragons")) + } + } + + at := _at(-1) + switch callee := node.callee.(type) { + case *_nodeIdentifier: + at = _at(callee.idx) + case *_nodeDotExpression: + at = _at(callee.idx) + case *_nodeBracketExpression: + at = _at(callee.idx) + } + + if !vl.IsFunction() { + if name == "" { + // FIXME Maybe typeof? + panic(rt.panicTypeError("%v is not a function", vl, at)) + } + panic(rt.panicTypeError("'%s' is not a function", name, at)) + } + + self.scope.frame.offset = int(at) + + return vl._object().construct(argumentList) +} + +func (self *_runtime) cmpl_evaluate_nodeObjectLiteral(node *_nodeObjectLiteral) Value { + + result := self.newObject() + + for _, property := range node.value { + switch property.kind { + case "value": + result.defineProperty(property.key, self.cmpl_evaluate_nodeExpression(property.value).resolve(), 0111, false) + case "get": + getter := self.newNodeFunction(property.value.(*_nodeFunctionLiteral), self.scope.lexical) + descriptor := _property{} + descriptor.mode = 0211 + descriptor.value = _propertyGetSet{getter, nil} + result.defineOwnProperty(property.key, descriptor, false) + case "set": + setter := self.newNodeFunction(property.value.(*_nodeFunctionLiteral), self.scope.lexical) + descriptor := _property{} + descriptor.mode = 0211 + descriptor.value = _propertyGetSet{nil, setter} + result.defineOwnProperty(property.key, descriptor, false) + default: + panic(fmt.Errorf("Here be dragons: evaluate_nodeObjectLiteral: invalid property.Kind: %v", property.kind)) + } + } + + return toValue_object(result) +} + +func (self *_runtime) cmpl_evaluate_nodeSequenceExpression(node *_nodeSequenceExpression) Value { + var result Value + for _, node := range node.sequence { + result = self.cmpl_evaluate_nodeExpression(node) + result = result.resolve() + } + return result +} + +func (self *_runtime) cmpl_evaluate_nodeUnaryExpression(node *_nodeUnaryExpression) Value { + + target := self.cmpl_evaluate_nodeExpression(node.operand) + switch node.operator { + case token.TYPEOF, token.DELETE: + if target.kind == valueReference && target.reference().invalid() { + if node.operator == token.TYPEOF { + return toValue_string("undefined") + } + return trueValue + } + } + + switch node.operator { + case token.NOT: + targetValue := target.resolve() + if targetValue.bool() { + return falseValue + } + return trueValue + case token.BITWISE_NOT: + targetValue := target.resolve() + integerValue := toInt32(targetValue) + return toValue_int32(^integerValue) + case token.PLUS: + targetValue := target.resolve() + return toValue_float64(targetValue.float64()) + case token.MINUS: + targetValue := target.resolve() + value := targetValue.float64() + // TODO Test this + sign := float64(-1) + if math.Signbit(value) { + sign = 1 + } + return toValue_float64(math.Copysign(value, sign)) + case token.INCREMENT: + targetValue := target.resolve() + if node.postfix { + // Postfix++ + oldValue := targetValue.float64() + newValue := toValue_float64(+1 + oldValue) + self.putValue(target.reference(), newValue) + return toValue_float64(oldValue) + } else { + // ++Prefix + newValue := toValue_float64(+1 + targetValue.float64()) + self.putValue(target.reference(), newValue) + return newValue + } + case token.DECREMENT: + targetValue := target.resolve() + if node.postfix { + // Postfix-- + oldValue := targetValue.float64() + newValue := toValue_float64(-1 + oldValue) + self.putValue(target.reference(), newValue) + return toValue_float64(oldValue) + } else { + // --Prefix + newValue := toValue_float64(-1 + targetValue.float64()) + self.putValue(target.reference(), newValue) + return newValue + } + case token.VOID: + target.resolve() // FIXME Side effect? + return Value{} + case token.DELETE: + reference := target.reference() + if reference == nil { + return trueValue + } + return toValue_bool(target.reference().delete()) + case token.TYPEOF: + targetValue := target.resolve() + switch targetValue.kind { + case valueUndefined: + return toValue_string("undefined") + case valueNull: + return toValue_string("object") + case valueBoolean: + return toValue_string("boolean") + case valueNumber: + return toValue_string("number") + case valueString: + return toValue_string("string") + case valueObject: + if targetValue._object().isCall() { + return toValue_string("function") + } + return toValue_string("object") + default: + // FIXME ? + } + } + + panic(hereBeDragons()) +} + +func (self *_runtime) cmpl_evaluate_nodeVariableExpression(node *_nodeVariableExpression) Value { + if node.initializer != nil { + // FIXME If reference is nil + left := getIdentifierReference(self, self.scope.lexical, node.name, false, _at(node.idx)) + right := self.cmpl_evaluate_nodeExpression(node.initializer) + rightValue := right.resolve() + + self.putValue(left, rightValue) + } + return toValue_string(node.name) +} diff --git a/vendor/github.com/robertkrimen/otto/cmpl_evaluate_statement.go b/vendor/github.com/robertkrimen/otto/cmpl_evaluate_statement.go new file mode 100644 index 000000000..e16c6ac6c --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/cmpl_evaluate_statement.go @@ -0,0 +1,424 @@ +package otto + +import ( + "fmt" + "runtime" + + "github.com/robertkrimen/otto/token" +) + +func (self *_runtime) cmpl_evaluate_nodeStatement(node _nodeStatement) Value { + // Allow interpreter interruption + // If the Interrupt channel is nil, then + // we avoid runtime.Gosched() overhead (if any) + // FIXME: Test this + if self.otto.Interrupt != nil { + runtime.Gosched() + select { + case value := <-self.otto.Interrupt: + value() + default: + } + } + + switch node := node.(type) { + + case *_nodeBlockStatement: + labels := self.labels + self.labels = nil + + value := self.cmpl_evaluate_nodeStatementList(node.list) + switch value.kind { + case valueResult: + switch value.evaluateBreak(labels) { + case resultBreak: + return emptyValue + } + } + return value + + case *_nodeBranchStatement: + target := node.label + switch node.branch { // FIXME Maybe node.kind? node.operator? + case token.BREAK: + return toValue(newBreakResult(target)) + case token.CONTINUE: + return toValue(newContinueResult(target)) + } + + case *_nodeDebuggerStatement: + if self.debugger != nil { + self.debugger(self.otto) + } + return emptyValue // Nothing happens. + + case *_nodeDoWhileStatement: + return self.cmpl_evaluate_nodeDoWhileStatement(node) + + case *_nodeEmptyStatement: + return emptyValue + + case *_nodeExpressionStatement: + return self.cmpl_evaluate_nodeExpression(node.expression) + + case *_nodeForInStatement: + return self.cmpl_evaluate_nodeForInStatement(node) + + case *_nodeForStatement: + return self.cmpl_evaluate_nodeForStatement(node) + + case *_nodeIfStatement: + return self.cmpl_evaluate_nodeIfStatement(node) + + case *_nodeLabelledStatement: + self.labels = append(self.labels, node.label) + defer func() { + if len(self.labels) > 0 { + self.labels = self.labels[:len(self.labels)-1] // Pop the label + } else { + self.labels = nil + } + }() + return self.cmpl_evaluate_nodeStatement(node.statement) + + case *_nodeReturnStatement: + if node.argument != nil { + return toValue(newReturnResult(self.cmpl_evaluate_nodeExpression(node.argument).resolve())) + } + return toValue(newReturnResult(Value{})) + + case *_nodeSwitchStatement: + return self.cmpl_evaluate_nodeSwitchStatement(node) + + case *_nodeThrowStatement: + value := self.cmpl_evaluate_nodeExpression(node.argument).resolve() + panic(newException(value)) + + case *_nodeTryStatement: + return self.cmpl_evaluate_nodeTryStatement(node) + + case *_nodeVariableStatement: + // Variables are already defined, this is initialization only + for _, variable := range node.list { + self.cmpl_evaluate_nodeVariableExpression(variable.(*_nodeVariableExpression)) + } + return emptyValue + + case *_nodeWhileStatement: + return self.cmpl_evaluate_nodeWhileStatement(node) + + case *_nodeWithStatement: + return self.cmpl_evaluate_nodeWithStatement(node) + + } + + panic(fmt.Errorf("Here be dragons: evaluate_nodeStatement(%T)", node)) +} + +func (self *_runtime) cmpl_evaluate_nodeStatementList(list []_nodeStatement) Value { + var result Value + for _, node := range list { + value := self.cmpl_evaluate_nodeStatement(node) + switch value.kind { + case valueResult: + return value + case valueEmpty: + default: + // We have getValue here to (for example) trigger a + // ReferenceError (of the not defined variety) + // Not sure if this is the best way to error out early + // for such errors or if there is a better way + // TODO Do we still need this? + result = value.resolve() + } + } + return result +} + +func (self *_runtime) cmpl_evaluate_nodeDoWhileStatement(node *_nodeDoWhileStatement) Value { + + labels := append(self.labels, "") + self.labels = nil + + test := node.test + + result := emptyValue +resultBreak: + for { + for _, node := range node.body { + value := self.cmpl_evaluate_nodeStatement(node) + switch value.kind { + case valueResult: + switch value.evaluateBreakContinue(labels) { + case resultReturn: + return value + case resultBreak: + break resultBreak + case resultContinue: + goto resultContinue + } + case valueEmpty: + default: + result = value + } + } + resultContinue: + if !self.cmpl_evaluate_nodeExpression(test).resolve().bool() { + // Stahp: do ... while (false) + break + } + } + return result +} + +func (self *_runtime) cmpl_evaluate_nodeForInStatement(node *_nodeForInStatement) Value { + + labels := append(self.labels, "") + self.labels = nil + + source := self.cmpl_evaluate_nodeExpression(node.source) + sourceValue := source.resolve() + + switch sourceValue.kind { + case valueUndefined, valueNull: + return emptyValue + } + + sourceObject := self.toObject(sourceValue) + + into := node.into + body := node.body + + result := emptyValue + object := sourceObject + for object != nil { + enumerateValue := emptyValue + object.enumerate(false, func(name string) bool { + into := self.cmpl_evaluate_nodeExpression(into) + // In the case of: for (var abc in def) ... + if into.reference() == nil { + identifier := into.string() + // TODO Should be true or false (strictness) depending on context + into = toValue(getIdentifierReference(self, self.scope.lexical, identifier, false, -1)) + } + self.putValue(into.reference(), toValue_string(name)) + for _, node := range body { + value := self.cmpl_evaluate_nodeStatement(node) + switch value.kind { + case valueResult: + switch value.evaluateBreakContinue(labels) { + case resultReturn: + enumerateValue = value + return false + case resultBreak: + object = nil + return false + case resultContinue: + return true + } + case valueEmpty: + default: + enumerateValue = value + } + } + return true + }) + if object == nil { + break + } + object = object.prototype + if !enumerateValue.isEmpty() { + result = enumerateValue + } + } + return result +} + +func (self *_runtime) cmpl_evaluate_nodeForStatement(node *_nodeForStatement) Value { + + labels := append(self.labels, "") + self.labels = nil + + initializer := node.initializer + test := node.test + update := node.update + body := node.body + + if initializer != nil { + initialResult := self.cmpl_evaluate_nodeExpression(initializer) + initialResult.resolve() // Side-effect trigger + } + + result := emptyValue +resultBreak: + for { + if test != nil { + testResult := self.cmpl_evaluate_nodeExpression(test) + testResultValue := testResult.resolve() + if testResultValue.bool() == false { + break + } + } + for _, node := range body { + value := self.cmpl_evaluate_nodeStatement(node) + switch value.kind { + case valueResult: + switch value.evaluateBreakContinue(labels) { + case resultReturn: + return value + case resultBreak: + break resultBreak + case resultContinue: + goto resultContinue + } + case valueEmpty: + default: + result = value + } + } + resultContinue: + if update != nil { + updateResult := self.cmpl_evaluate_nodeExpression(update) + updateResult.resolve() // Side-effect trigger + } + } + return result +} + +func (self *_runtime) cmpl_evaluate_nodeIfStatement(node *_nodeIfStatement) Value { + test := self.cmpl_evaluate_nodeExpression(node.test) + testValue := test.resolve() + if testValue.bool() { + return self.cmpl_evaluate_nodeStatement(node.consequent) + } else if node.alternate != nil { + return self.cmpl_evaluate_nodeStatement(node.alternate) + } + + return emptyValue +} + +func (self *_runtime) cmpl_evaluate_nodeSwitchStatement(node *_nodeSwitchStatement) Value { + + labels := append(self.labels, "") + self.labels = nil + + discriminantResult := self.cmpl_evaluate_nodeExpression(node.discriminant) + target := node.default_ + + for index, clause := range node.body { + test := clause.test + if test != nil { + if self.calculateComparison(token.STRICT_EQUAL, discriminantResult, self.cmpl_evaluate_nodeExpression(test)) { + target = index + break + } + } + } + + result := emptyValue + if target != -1 { + for _, clause := range node.body[target:] { + for _, statement := range clause.consequent { + value := self.cmpl_evaluate_nodeStatement(statement) + switch value.kind { + case valueResult: + switch value.evaluateBreak(labels) { + case resultReturn: + return value + case resultBreak: + return emptyValue + } + case valueEmpty: + default: + result = value + } + } + } + } + + return result +} + +func (self *_runtime) cmpl_evaluate_nodeTryStatement(node *_nodeTryStatement) Value { + tryCatchValue, exception := self.tryCatchEvaluate(func() Value { + return self.cmpl_evaluate_nodeStatement(node.body) + }) + + if exception && node.catch != nil { + outer := self.scope.lexical + self.scope.lexical = self.newDeclarationStash(outer) + defer func() { + self.scope.lexical = outer + }() + // TODO If necessary, convert TypeError => TypeError + // That, is, such errors can be thrown despite not being JavaScript "native" + // strict = false + self.scope.lexical.setValue(node.catch.parameter, tryCatchValue, false) + + // FIXME node.CatchParameter + // FIXME node.Catch + tryCatchValue, exception = self.tryCatchEvaluate(func() Value { + return self.cmpl_evaluate_nodeStatement(node.catch.body) + }) + } + + if node.finally != nil { + finallyValue := self.cmpl_evaluate_nodeStatement(node.finally) + if finallyValue.kind == valueResult { + return finallyValue + } + } + + if exception { + panic(newException(tryCatchValue)) + } + + return tryCatchValue +} + +func (self *_runtime) cmpl_evaluate_nodeWhileStatement(node *_nodeWhileStatement) Value { + + test := node.test + body := node.body + labels := append(self.labels, "") + self.labels = nil + + result := emptyValue +resultBreakContinue: + for { + if !self.cmpl_evaluate_nodeExpression(test).resolve().bool() { + // Stahp: while (false) ... + break + } + for _, node := range body { + value := self.cmpl_evaluate_nodeStatement(node) + switch value.kind { + case valueResult: + switch value.evaluateBreakContinue(labels) { + case resultReturn: + return value + case resultBreak: + break resultBreakContinue + case resultContinue: + continue resultBreakContinue + } + case valueEmpty: + default: + result = value + } + } + } + return result +} + +func (self *_runtime) cmpl_evaluate_nodeWithStatement(node *_nodeWithStatement) Value { + object := self.cmpl_evaluate_nodeExpression(node.object) + outer := self.scope.lexical + lexical := self.newObjectStash(self.toObject(object.resolve()), outer) + self.scope.lexical = lexical + defer func() { + self.scope.lexical = outer + }() + + return self.cmpl_evaluate_nodeStatement(node.body) +} diff --git a/vendor/github.com/robertkrimen/otto/cmpl_parse.go b/vendor/github.com/robertkrimen/otto/cmpl_parse.go new file mode 100644 index 000000000..dc5baa12a --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/cmpl_parse.go @@ -0,0 +1,656 @@ +package otto + +import ( + "fmt" + "regexp" + + "github.com/robertkrimen/otto/ast" + "github.com/robertkrimen/otto/file" + "github.com/robertkrimen/otto/token" +) + +var trueLiteral = &_nodeLiteral{value: toValue_bool(true)} +var falseLiteral = &_nodeLiteral{value: toValue_bool(false)} +var nullLiteral = &_nodeLiteral{value: nullValue} +var emptyStatement = &_nodeEmptyStatement{} + +func (cmpl *_compiler) parseExpression(in ast.Expression) _nodeExpression { + if in == nil { + return nil + } + + switch in := in.(type) { + + case *ast.ArrayLiteral: + out := &_nodeArrayLiteral{ + value: make([]_nodeExpression, len(in.Value)), + } + for i, value := range in.Value { + out.value[i] = cmpl.parseExpression(value) + } + return out + + case *ast.AssignExpression: + return &_nodeAssignExpression{ + operator: in.Operator, + left: cmpl.parseExpression(in.Left), + right: cmpl.parseExpression(in.Right), + } + + case *ast.BinaryExpression: + return &_nodeBinaryExpression{ + operator: in.Operator, + left: cmpl.parseExpression(in.Left), + right: cmpl.parseExpression(in.Right), + comparison: in.Comparison, + } + + case *ast.BooleanLiteral: + if in.Value { + return trueLiteral + } + return falseLiteral + + case *ast.BracketExpression: + return &_nodeBracketExpression{ + idx: in.Left.Idx0(), + left: cmpl.parseExpression(in.Left), + member: cmpl.parseExpression(in.Member), + } + + case *ast.CallExpression: + out := &_nodeCallExpression{ + callee: cmpl.parseExpression(in.Callee), + argumentList: make([]_nodeExpression, len(in.ArgumentList)), + } + for i, value := range in.ArgumentList { + out.argumentList[i] = cmpl.parseExpression(value) + } + return out + + case *ast.ConditionalExpression: + return &_nodeConditionalExpression{ + test: cmpl.parseExpression(in.Test), + consequent: cmpl.parseExpression(in.Consequent), + alternate: cmpl.parseExpression(in.Alternate), + } + + case *ast.DotExpression: + return &_nodeDotExpression{ + idx: in.Left.Idx0(), + left: cmpl.parseExpression(in.Left), + identifier: in.Identifier.Name, + } + + case *ast.EmptyExpression: + return nil + + case *ast.FunctionLiteral: + name := "" + if in.Name != nil { + name = in.Name.Name + } + out := &_nodeFunctionLiteral{ + name: name, + body: cmpl.parseStatement(in.Body), + source: in.Source, + file: cmpl.file, + } + if in.ParameterList != nil { + list := in.ParameterList.List + out.parameterList = make([]string, len(list)) + for i, value := range list { + out.parameterList[i] = value.Name + } + } + for _, value := range in.DeclarationList { + switch value := value.(type) { + case *ast.FunctionDeclaration: + out.functionList = append(out.functionList, cmpl.parseExpression(value.Function).(*_nodeFunctionLiteral)) + case *ast.VariableDeclaration: + for _, value := range value.List { + out.varList = append(out.varList, value.Name) + } + default: + panic(fmt.Errorf("Here be dragons: parseProgram.declaration(%T)", value)) + } + } + return out + + case *ast.Identifier: + return &_nodeIdentifier{ + idx: in.Idx, + name: in.Name, + } + + case *ast.NewExpression: + out := &_nodeNewExpression{ + callee: cmpl.parseExpression(in.Callee), + argumentList: make([]_nodeExpression, len(in.ArgumentList)), + } + for i, value := range in.ArgumentList { + out.argumentList[i] = cmpl.parseExpression(value) + } + return out + + case *ast.NullLiteral: + return nullLiteral + + case *ast.NumberLiteral: + return &_nodeLiteral{ + value: toValue(in.Value), + } + + case *ast.ObjectLiteral: + out := &_nodeObjectLiteral{ + value: make([]_nodeProperty, len(in.Value)), + } + for i, value := range in.Value { + out.value[i] = _nodeProperty{ + key: value.Key, + kind: value.Kind, + value: cmpl.parseExpression(value.Value), + } + } + return out + + case *ast.RegExpLiteral: + return &_nodeRegExpLiteral{ + flags: in.Flags, + pattern: in.Pattern, + } + + case *ast.SequenceExpression: + out := &_nodeSequenceExpression{ + sequence: make([]_nodeExpression, len(in.Sequence)), + } + for i, value := range in.Sequence { + out.sequence[i] = cmpl.parseExpression(value) + } + return out + + case *ast.StringLiteral: + return &_nodeLiteral{ + value: toValue_string(in.Value), + } + + case *ast.ThisExpression: + return &_nodeThisExpression{} + + case *ast.UnaryExpression: + return &_nodeUnaryExpression{ + operator: in.Operator, + operand: cmpl.parseExpression(in.Operand), + postfix: in.Postfix, + } + + case *ast.VariableExpression: + return &_nodeVariableExpression{ + idx: in.Idx0(), + name: in.Name, + initializer: cmpl.parseExpression(in.Initializer), + } + + } + + panic(fmt.Errorf("Here be dragons: cmpl.parseExpression(%T)", in)) +} + +func (cmpl *_compiler) parseStatement(in ast.Statement) _nodeStatement { + if in == nil { + return nil + } + + switch in := in.(type) { + + case *ast.BlockStatement: + out := &_nodeBlockStatement{ + list: make([]_nodeStatement, len(in.List)), + } + for i, value := range in.List { + out.list[i] = cmpl.parseStatement(value) + } + return out + + case *ast.BranchStatement: + out := &_nodeBranchStatement{ + branch: in.Token, + } + if in.Label != nil { + out.label = in.Label.Name + } + return out + + case *ast.DebuggerStatement: + return &_nodeDebuggerStatement{} + + case *ast.DoWhileStatement: + out := &_nodeDoWhileStatement{ + test: cmpl.parseExpression(in.Test), + } + body := cmpl.parseStatement(in.Body) + if block, ok := body.(*_nodeBlockStatement); ok { + out.body = block.list + } else { + out.body = append(out.body, body) + } + return out + + case *ast.EmptyStatement: + return emptyStatement + + case *ast.ExpressionStatement: + return &_nodeExpressionStatement{ + expression: cmpl.parseExpression(in.Expression), + } + + case *ast.ForInStatement: + out := &_nodeForInStatement{ + into: cmpl.parseExpression(in.Into), + source: cmpl.parseExpression(in.Source), + } + body := cmpl.parseStatement(in.Body) + if block, ok := body.(*_nodeBlockStatement); ok { + out.body = block.list + } else { + out.body = append(out.body, body) + } + return out + + case *ast.ForStatement: + out := &_nodeForStatement{ + initializer: cmpl.parseExpression(in.Initializer), + update: cmpl.parseExpression(in.Update), + test: cmpl.parseExpression(in.Test), + } + body := cmpl.parseStatement(in.Body) + if block, ok := body.(*_nodeBlockStatement); ok { + out.body = block.list + } else { + out.body = append(out.body, body) + } + return out + + case *ast.FunctionStatement: + return emptyStatement + + case *ast.IfStatement: + return &_nodeIfStatement{ + test: cmpl.parseExpression(in.Test), + consequent: cmpl.parseStatement(in.Consequent), + alternate: cmpl.parseStatement(in.Alternate), + } + + case *ast.LabelledStatement: + return &_nodeLabelledStatement{ + label: in.Label.Name, + statement: cmpl.parseStatement(in.Statement), + } + + case *ast.ReturnStatement: + return &_nodeReturnStatement{ + argument: cmpl.parseExpression(in.Argument), + } + + case *ast.SwitchStatement: + out := &_nodeSwitchStatement{ + discriminant: cmpl.parseExpression(in.Discriminant), + default_: in.Default, + body: make([]*_nodeCaseStatement, len(in.Body)), + } + for i, clause := range in.Body { + out.body[i] = &_nodeCaseStatement{ + test: cmpl.parseExpression(clause.Test), + consequent: make([]_nodeStatement, len(clause.Consequent)), + } + for j, value := range clause.Consequent { + out.body[i].consequent[j] = cmpl.parseStatement(value) + } + } + return out + + case *ast.ThrowStatement: + return &_nodeThrowStatement{ + argument: cmpl.parseExpression(in.Argument), + } + + case *ast.TryStatement: + out := &_nodeTryStatement{ + body: cmpl.parseStatement(in.Body), + finally: cmpl.parseStatement(in.Finally), + } + if in.Catch != nil { + out.catch = &_nodeCatchStatement{ + parameter: in.Catch.Parameter.Name, + body: cmpl.parseStatement(in.Catch.Body), + } + } + return out + + case *ast.VariableStatement: + out := &_nodeVariableStatement{ + list: make([]_nodeExpression, len(in.List)), + } + for i, value := range in.List { + out.list[i] = cmpl.parseExpression(value) + } + return out + + case *ast.WhileStatement: + out := &_nodeWhileStatement{ + test: cmpl.parseExpression(in.Test), + } + body := cmpl.parseStatement(in.Body) + if block, ok := body.(*_nodeBlockStatement); ok { + out.body = block.list + } else { + out.body = append(out.body, body) + } + return out + + case *ast.WithStatement: + return &_nodeWithStatement{ + object: cmpl.parseExpression(in.Object), + body: cmpl.parseStatement(in.Body), + } + + } + + panic(fmt.Errorf("Here be dragons: cmpl.parseStatement(%T)", in)) +} + +func cmpl_parse(in *ast.Program) *_nodeProgram { + cmpl := _compiler{ + program: in, + } + return cmpl.parse() +} + +func (cmpl *_compiler) _parse(in *ast.Program) *_nodeProgram { + out := &_nodeProgram{ + body: make([]_nodeStatement, len(in.Body)), + file: in.File, + } + for i, value := range in.Body { + out.body[i] = cmpl.parseStatement(value) + } + for _, value := range in.DeclarationList { + switch value := value.(type) { + case *ast.FunctionDeclaration: + out.functionList = append(out.functionList, cmpl.parseExpression(value.Function).(*_nodeFunctionLiteral)) + case *ast.VariableDeclaration: + for _, value := range value.List { + out.varList = append(out.varList, value.Name) + } + default: + panic(fmt.Errorf("Here be dragons: cmpl.parseProgram.DeclarationList(%T)", value)) + } + } + return out +} + +type _nodeProgram struct { + body []_nodeStatement + + varList []string + functionList []*_nodeFunctionLiteral + + variableList []_nodeDeclaration + + file *file.File +} + +type _nodeDeclaration struct { + name string + definition _node +} + +type _node interface { +} + +type ( + _nodeExpression interface { + _node + _expressionNode() + } + + _nodeArrayLiteral struct { + value []_nodeExpression + } + + _nodeAssignExpression struct { + operator token.Token + left _nodeExpression + right _nodeExpression + } + + _nodeBinaryExpression struct { + operator token.Token + left _nodeExpression + right _nodeExpression + comparison bool + } + + _nodeBracketExpression struct { + idx file.Idx + left _nodeExpression + member _nodeExpression + } + + _nodeCallExpression struct { + callee _nodeExpression + argumentList []_nodeExpression + } + + _nodeConditionalExpression struct { + test _nodeExpression + consequent _nodeExpression + alternate _nodeExpression + } + + _nodeDotExpression struct { + idx file.Idx + left _nodeExpression + identifier string + } + + _nodeFunctionLiteral struct { + name string + body _nodeStatement + source string + parameterList []string + varList []string + functionList []*_nodeFunctionLiteral + file *file.File + } + + _nodeIdentifier struct { + idx file.Idx + name string + } + + _nodeLiteral struct { + value Value + } + + _nodeNewExpression struct { + callee _nodeExpression + argumentList []_nodeExpression + } + + _nodeObjectLiteral struct { + value []_nodeProperty + } + + _nodeProperty struct { + key string + kind string + value _nodeExpression + } + + _nodeRegExpLiteral struct { + flags string + pattern string // Value? + regexp *regexp.Regexp + } + + _nodeSequenceExpression struct { + sequence []_nodeExpression + } + + _nodeThisExpression struct { + } + + _nodeUnaryExpression struct { + operator token.Token + operand _nodeExpression + postfix bool + } + + _nodeVariableExpression struct { + idx file.Idx + name string + initializer _nodeExpression + } +) + +type ( + _nodeStatement interface { + _node + _statementNode() + } + + _nodeBlockStatement struct { + list []_nodeStatement + } + + _nodeBranchStatement struct { + branch token.Token + label string + } + + _nodeCaseStatement struct { + test _nodeExpression + consequent []_nodeStatement + } + + _nodeCatchStatement struct { + parameter string + body _nodeStatement + } + + _nodeDebuggerStatement struct { + } + + _nodeDoWhileStatement struct { + test _nodeExpression + body []_nodeStatement + } + + _nodeEmptyStatement struct { + } + + _nodeExpressionStatement struct { + expression _nodeExpression + } + + _nodeForInStatement struct { + into _nodeExpression + source _nodeExpression + body []_nodeStatement + } + + _nodeForStatement struct { + initializer _nodeExpression + update _nodeExpression + test _nodeExpression + body []_nodeStatement + } + + _nodeIfStatement struct { + test _nodeExpression + consequent _nodeStatement + alternate _nodeStatement + } + + _nodeLabelledStatement struct { + label string + statement _nodeStatement + } + + _nodeReturnStatement struct { + argument _nodeExpression + } + + _nodeSwitchStatement struct { + discriminant _nodeExpression + default_ int + body []*_nodeCaseStatement + } + + _nodeThrowStatement struct { + argument _nodeExpression + } + + _nodeTryStatement struct { + body _nodeStatement + catch *_nodeCatchStatement + finally _nodeStatement + } + + _nodeVariableStatement struct { + list []_nodeExpression + } + + _nodeWhileStatement struct { + test _nodeExpression + body []_nodeStatement + } + + _nodeWithStatement struct { + object _nodeExpression + body _nodeStatement + } +) + +// _expressionNode + +func (*_nodeArrayLiteral) _expressionNode() {} +func (*_nodeAssignExpression) _expressionNode() {} +func (*_nodeBinaryExpression) _expressionNode() {} +func (*_nodeBracketExpression) _expressionNode() {} +func (*_nodeCallExpression) _expressionNode() {} +func (*_nodeConditionalExpression) _expressionNode() {} +func (*_nodeDotExpression) _expressionNode() {} +func (*_nodeFunctionLiteral) _expressionNode() {} +func (*_nodeIdentifier) _expressionNode() {} +func (*_nodeLiteral) _expressionNode() {} +func (*_nodeNewExpression) _expressionNode() {} +func (*_nodeObjectLiteral) _expressionNode() {} +func (*_nodeRegExpLiteral) _expressionNode() {} +func (*_nodeSequenceExpression) _expressionNode() {} +func (*_nodeThisExpression) _expressionNode() {} +func (*_nodeUnaryExpression) _expressionNode() {} +func (*_nodeVariableExpression) _expressionNode() {} + +// _statementNode + +func (*_nodeBlockStatement) _statementNode() {} +func (*_nodeBranchStatement) _statementNode() {} +func (*_nodeCaseStatement) _statementNode() {} +func (*_nodeCatchStatement) _statementNode() {} +func (*_nodeDebuggerStatement) _statementNode() {} +func (*_nodeDoWhileStatement) _statementNode() {} +func (*_nodeEmptyStatement) _statementNode() {} +func (*_nodeExpressionStatement) _statementNode() {} +func (*_nodeForInStatement) _statementNode() {} +func (*_nodeForStatement) _statementNode() {} +func (*_nodeIfStatement) _statementNode() {} +func (*_nodeLabelledStatement) _statementNode() {} +func (*_nodeReturnStatement) _statementNode() {} +func (*_nodeSwitchStatement) _statementNode() {} +func (*_nodeThrowStatement) _statementNode() {} +func (*_nodeTryStatement) _statementNode() {} +func (*_nodeVariableStatement) _statementNode() {} +func (*_nodeWhileStatement) _statementNode() {} +func (*_nodeWithStatement) _statementNode() {} diff --git a/vendor/github.com/robertkrimen/otto/console.go b/vendor/github.com/robertkrimen/otto/console.go new file mode 100644 index 000000000..948face77 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/console.go @@ -0,0 +1,51 @@ +package otto + +import ( + "fmt" + "os" + "strings" +) + +func formatForConsole(argumentList []Value) string { + output := []string{} + for _, argument := range argumentList { + output = append(output, fmt.Sprintf("%v", argument)) + } + return strings.Join(output, " ") +} + +func builtinConsole_log(call FunctionCall) Value { + fmt.Fprintln(os.Stdout, formatForConsole(call.ArgumentList)) + return Value{} +} + +func builtinConsole_error(call FunctionCall) Value { + fmt.Fprintln(os.Stdout, formatForConsole(call.ArgumentList)) + return Value{} +} + +// Nothing happens. +func builtinConsole_dir(call FunctionCall) Value { + return Value{} +} + +func builtinConsole_time(call FunctionCall) Value { + return Value{} +} + +func builtinConsole_timeEnd(call FunctionCall) Value { + return Value{} +} + +func builtinConsole_trace(call FunctionCall) Value { + return Value{} +} + +func builtinConsole_assert(call FunctionCall) Value { + return Value{} +} + +func (runtime *_runtime) newConsole() *_object { + + return newConsoleObject(runtime) +} diff --git a/vendor/github.com/robertkrimen/otto/dbg.go b/vendor/github.com/robertkrimen/otto/dbg.go new file mode 100644 index 000000000..51fbdc206 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/dbg.go @@ -0,0 +1,9 @@ +// This file was AUTOMATICALLY GENERATED by dbg-import (smuggol) for github.com/robertkrimen/dbg + +package otto + +import ( + Dbg "github.com/robertkrimen/otto/dbg" +) + +var dbg, dbgf = Dbg.New() diff --git a/vendor/github.com/robertkrimen/otto/dbg/dbg.go b/vendor/github.com/robertkrimen/otto/dbg/dbg.go new file mode 100644 index 000000000..8c27fa293 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/dbg/dbg.go @@ -0,0 +1,387 @@ +// This file was AUTOMATICALLY GENERATED by dbg-import (smuggol) from github.com/robertkrimen/dbg + +/* +Package dbg is a println/printf/log-debugging utility library. + + import ( + Dbg "github.com/robertkrimen/dbg" + ) + + dbg, dbgf := Dbg.New() + + dbg("Emit some debug stuff", []byte{120, 121, 122, 122, 121}, math.Pi) + # "2013/01/28 16:50:03 Emit some debug stuff [120 121 122 122 121] 3.141592653589793" + + dbgf("With a %s formatting %.2f", "little", math.Pi) + # "2013/01/28 16:51:55 With a little formatting (3.14)" + + dbgf("%/fatal//A fatal debug statement: should not be here") + # "A fatal debug statement: should not be here" + # ...and then, os.Exit(1) + + dbgf("%/panic//Can also panic %s", "this") + # "Can also panic this" + # ...as a panic, equivalent to: panic("Can also panic this") + + dbgf("Any %s arguments without a corresponding %%", "extra", "are treated like arguments to dbg()") + # "2013/01/28 17:14:40 Any extra arguments (without a corresponding %) are treated like arguments to dbg()" + + dbgf("%d %d", 1, 2, 3, 4, 5) + # "2013/01/28 17:16:32 Another example: 1 2 3 4 5" + + dbgf("%@: Include the function name for a little context (via %s)", "%@") + # "2013... github.com/robertkrimen/dbg.TestSynopsis: Include the function name for a little context (via %@)" + +By default, dbg uses log (log.Println, log.Printf, log.Panic, etc.) for output. +However, you can also provide your own output destination by invoking dbg.New with +a customization function: + + import ( + "bytes" + Dbg "github.com/robertkrimen/dbg" + "os" + ) + + # dbg to os.Stderr + dbg, dbgf := Dbg.New(func(dbgr *Dbgr) { + dbgr.SetOutput(os.Stderr) + }) + + # A slightly contrived example: + var buffer bytes.Buffer + dbg, dbgf := New(func(dbgr *Dbgr) { + dbgr.SetOutput(&buffer) + }) + +*/ +package dbg + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "regexp" + "runtime" + "strings" + "unicode" +) + +type _frmt struct { + ctl string + format string + operandCount int + panic bool + fatal bool + check bool +} + +var ( + ctlTest = regexp.MustCompile(`^\s*%/`) + ctlScan = regexp.MustCompile(`%?/(panic|fatal|check)(?:\s|$)`) +) + +func operandCount(format string) int { + count := 0 + end := len(format) + for at := 0; at < end; { + for at < end && format[at] != '%' { + at++ + } + at++ + if at < end { + if format[at] != '%' && format[at] != '@' { + count++ + } + at++ + } + } + return count +} + +func parseFormat(format string) (frmt _frmt) { + if ctlTest.MatchString(format) { + format = strings.TrimLeftFunc(format, unicode.IsSpace) + index := strings.Index(format, "//") + if index != -1 { + frmt.ctl = format[0:index] + format = format[index+2:] // Skip the second slash via +2 (instead of +1) + } else { + frmt.ctl = format + format = "" + } + for _, tmp := range ctlScan.FindAllStringSubmatch(frmt.ctl, -1) { + for _, value := range tmp[1:] { + switch value { + case "panic": + frmt.panic = true + case "fatal": + frmt.fatal = true + case "check": + frmt.check = true + } + } + } + } + frmt.format = format + frmt.operandCount = operandCount(format) + return +} + +type Dbgr struct { + emit _emit +} + +type DbgFunction func(values ...interface{}) + +func NewDbgr() *Dbgr { + self := &Dbgr{} + return self +} + +/* +New will create and return a pair of debugging functions. You can customize where +they output to by passing in an (optional) customization function: + + import ( + Dbg "github.com/robertkrimen/dbg" + "os" + ) + + # dbg to os.Stderr + dbg, dbgf := Dbg.New(func(dbgr *Dbgr) { + dbgr.SetOutput(os.Stderr) + }) + +*/ +func New(options ...interface{}) (dbg DbgFunction, dbgf DbgFunction) { + dbgr := NewDbgr() + if len(options) > 0 { + if fn, ok := options[0].(func(*Dbgr)); ok { + fn(dbgr) + } + } + return dbgr.DbgDbgf() +} + +func (self Dbgr) Dbg(values ...interface{}) { + self.getEmit().emit(_frmt{}, "", values...) +} + +func (self Dbgr) Dbgf(values ...interface{}) { + self.dbgf(values...) +} + +func (self Dbgr) DbgDbgf() (dbg DbgFunction, dbgf DbgFunction) { + dbg = func(vl ...interface{}) { + self.Dbg(vl...) + } + dbgf = func(vl ...interface{}) { + self.dbgf(vl...) + } + return dbg, dbgf // Redundant, but... +} + +func (self Dbgr) dbgf(values ...interface{}) { + + var frmt _frmt + if len(values) > 0 { + tmp := fmt.Sprint(values[0]) + frmt = parseFormat(tmp) + values = values[1:] + } + + buffer_f := bytes.Buffer{} + format := frmt.format + end := len(format) + for at := 0; at < end; { + last := at + for at < end && format[at] != '%' { + at++ + } + if at > last { + buffer_f.WriteString(format[last:at]) + } + if at >= end { + break + } + // format[at] == '%' + at++ + // format[at] == ? + if format[at] == '@' { + depth := 2 + pc, _, _, _ := runtime.Caller(depth) + name := runtime.FuncForPC(pc).Name() + buffer_f.WriteString(name) + } else { + buffer_f.WriteString(format[at-1 : at+1]) + } + at++ + } + + //values_f := append([]interface{}{}, values[0:frmt.operandCount]...) + values_f := values[0:frmt.operandCount] + values_dbg := values[frmt.operandCount:] + if len(values_dbg) > 0 { + // Adjust frmt.format: + // (%v instead of %s because: frmt.check) + { + tmp := format + if len(tmp) > 0 { + if unicode.IsSpace(rune(tmp[len(tmp)-1])) { + buffer_f.WriteString("%v") + } else { + buffer_f.WriteString(" %v") + } + } else if frmt.check { + // Performing a check, so no output + } else { + buffer_f.WriteString("%v") + } + } + + // Adjust values_f: + if !frmt.check { + tmp := []string{} + for _, value := range values_dbg { + tmp = append(tmp, fmt.Sprintf("%v", value)) + } + // First, make a copy of values_f, so we avoid overwriting values_dbg when appending + values_f = append([]interface{}{}, values_f...) + values_f = append(values_f, strings.Join(tmp, " ")) + } + } + + format = buffer_f.String() + if frmt.check { + // We do not actually emit to the log, but panic if + // a non-nil value is detected (e.g. a non-nil error) + for _, value := range values_dbg { + if value != nil { + if format == "" { + panic(value) + } else { + panic(fmt.Sprintf(format, append(values_f, value)...)) + } + } + } + } else { + self.getEmit().emit(frmt, format, values_f...) + } +} + +// Idiot-proof &Dbgr{}, etc. +func (self *Dbgr) getEmit() _emit { + if self.emit == nil { + self.emit = standardEmit() + } + return self.emit +} + +// SetOutput will accept the following as a destination for output: +// +// *log.Logger Print*/Panic*/Fatal* of the logger +// io.Writer - +// nil Reset to the default output (os.Stderr) +// "log" Print*/Panic*/Fatal* via the "log" package +// +func (self *Dbgr) SetOutput(output interface{}) { + if output == nil { + self.emit = standardEmit() + return + } + switch output := output.(type) { + case *log.Logger: + self.emit = _emitLogger{ + logger: output, + } + return + case io.Writer: + self.emit = _emitWriter{ + writer: output, + } + return + case string: + if output == "log" { + self.emit = _emitLog{} + return + } + } + panic(output) +} + +// ======== // +// = emit = // +// ======== // + +func standardEmit() _emit { + return _emitWriter{ + writer: os.Stderr, + } +} + +func ln(tmp string) string { + length := len(tmp) + if length > 0 && tmp[length-1] != '\n' { + return tmp + "\n" + } + return tmp +} + +type _emit interface { + emit(_frmt, string, ...interface{}) +} + +type _emitWriter struct { + writer io.Writer +} + +func (self _emitWriter) emit(frmt _frmt, format string, values ...interface{}) { + if format == "" { + fmt.Fprintln(self.writer, values...) + } else { + if frmt.panic { + panic(fmt.Sprintf(format, values...)) + } + fmt.Fprintf(self.writer, ln(format), values...) + if frmt.fatal { + os.Exit(1) + } + } +} + +type _emitLogger struct { + logger *log.Logger +} + +func (self _emitLogger) emit(frmt _frmt, format string, values ...interface{}) { + if format == "" { + self.logger.Println(values...) + } else { + if frmt.panic { + self.logger.Panicf(format, values...) + } else if frmt.fatal { + self.logger.Fatalf(format, values...) + } else { + self.logger.Printf(format, values...) + } + } +} + +type _emitLog struct { +} + +func (self _emitLog) emit(frmt _frmt, format string, values ...interface{}) { + if format == "" { + log.Println(values...) + } else { + if frmt.panic { + log.Panicf(format, values...) + } else if frmt.fatal { + log.Fatalf(format, values...) + } else { + log.Printf(format, values...) + } + } +} diff --git a/vendor/github.com/robertkrimen/otto/error.go b/vendor/github.com/robertkrimen/otto/error.go new file mode 100644 index 000000000..41a5c10e7 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/error.go @@ -0,0 +1,252 @@ +package otto + +import ( + "errors" + "fmt" + + "github.com/robertkrimen/otto/file" +) + +type _exception struct { + value interface{} +} + +func newException(value interface{}) *_exception { + return &_exception{ + value: value, + } +} + +func (self *_exception) eject() interface{} { + value := self.value + self.value = nil // Prevent Go from holding on to the value, whatever it is + return value +} + +type _error struct { + name string + message string + trace []_frame + + offset int +} + +func (err _error) format() string { + if len(err.name) == 0 { + return err.message + } + if len(err.message) == 0 { + return err.name + } + return fmt.Sprintf("%s: %s", err.name, err.message) +} + +func (err _error) formatWithStack() string { + str := err.format() + "\n" + for _, frame := range err.trace { + str += " at " + frame.location() + "\n" + } + return str +} + +type _frame struct { + native bool + nativeFile string + nativeLine int + file *file.File + offset int + callee string +} + +var ( + nativeFrame = _frame{} +) + +type _at int + +func (fr _frame) location() string { + str := "" + + switch { + case fr.native: + str = "" + if fr.nativeFile != "" && fr.nativeLine != 0 { + str = fmt.Sprintf("%s:%d", fr.nativeFile, fr.nativeLine) + } + case fr.file != nil: + if p := fr.file.Position(file.Idx(fr.offset)); p != nil { + path, line, column := p.Filename, p.Line, p.Column + + if path == "" { + path = "" + } + + str = fmt.Sprintf("%s:%d:%d", path, line, column) + } + } + + if fr.callee != "" { + str = fmt.Sprintf("%s (%s)", fr.callee, str) + } + + return str +} + +// An Error represents a runtime error, e.g. a TypeError, a ReferenceError, etc. +type Error struct { + _error +} + +// Error returns a description of the error +// +// TypeError: 'def' is not a function +// +func (err Error) Error() string { + return err.format() +} + +// String returns a description of the error and a trace of where the +// error occurred. +// +// TypeError: 'def' is not a function +// at xyz (:3:9) +// at :7:1/ +// +func (err Error) String() string { + return err.formatWithStack() +} + +func (err _error) describe(format string, in ...interface{}) string { + return fmt.Sprintf(format, in...) +} + +func (self _error) messageValue() Value { + if self.message == "" { + return Value{} + } + return toValue_string(self.message) +} + +func (rt *_runtime) typeErrorResult(throw bool) bool { + if throw { + panic(rt.panicTypeError()) + } + return false +} + +func newError(rt *_runtime, name string, stackFramesToPop int, in ...interface{}) _error { + err := _error{ + name: name, + offset: -1, + } + description := "" + length := len(in) + + if rt != nil && rt.scope != nil { + scope := rt.scope + + for i := 0; i < stackFramesToPop; i++ { + if scope.outer != nil { + scope = scope.outer + } + } + + frame := scope.frame + + if length > 0 { + if at, ok := in[length-1].(_at); ok { + in = in[0 : length-1] + if scope != nil { + frame.offset = int(at) + } + length-- + } + if length > 0 { + description, in = in[0].(string), in[1:] + } + } + + limit := rt.traceLimit + + err.trace = append(err.trace, frame) + if scope != nil { + for scope = scope.outer; scope != nil; scope = scope.outer { + if limit--; limit == 0 { + break + } + + if scope.frame.offset >= 0 { + err.trace = append(err.trace, scope.frame) + } + } + } + } else { + if length > 0 { + description, in = in[0].(string), in[1:] + } + } + err.message = err.describe(description, in...) + + return err +} + +func (rt *_runtime) panicTypeError(argumentList ...interface{}) *_exception { + return &_exception{ + value: newError(rt, "TypeError", 0, argumentList...), + } +} + +func (rt *_runtime) panicReferenceError(argumentList ...interface{}) *_exception { + return &_exception{ + value: newError(rt, "ReferenceError", 0, argumentList...), + } +} + +func (rt *_runtime) panicURIError(argumentList ...interface{}) *_exception { + return &_exception{ + value: newError(rt, "URIError", 0, argumentList...), + } +} + +func (rt *_runtime) panicSyntaxError(argumentList ...interface{}) *_exception { + return &_exception{ + value: newError(rt, "SyntaxError", 0, argumentList...), + } +} + +func (rt *_runtime) panicRangeError(argumentList ...interface{}) *_exception { + return &_exception{ + value: newError(rt, "RangeError", 0, argumentList...), + } +} + +func catchPanic(function func()) (err error) { + defer func() { + if caught := recover(); caught != nil { + if exception, ok := caught.(*_exception); ok { + caught = exception.eject() + } + switch caught := caught.(type) { + case *Error: + err = caught + return + case _error: + err = &Error{caught} + return + case Value: + if vl := caught._object(); vl != nil { + switch vl := vl.value.(type) { + case _error: + err = &Error{vl} + return + } + } + err = errors.New(caught.string()) + return + } + panic(caught) + } + }() + function() + return nil +} diff --git a/vendor/github.com/robertkrimen/otto/evaluate.go b/vendor/github.com/robertkrimen/otto/evaluate.go new file mode 100644 index 000000000..093054cc3 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/evaluate.go @@ -0,0 +1,318 @@ +package otto + +import ( + "fmt" + "math" + "strings" + + "github.com/robertkrimen/otto/token" +) + +func (self *_runtime) evaluateMultiply(left float64, right float64) Value { + // TODO 11.5.1 + return Value{} +} + +func (self *_runtime) evaluateDivide(left float64, right float64) Value { + if math.IsNaN(left) || math.IsNaN(right) { + return NaNValue() + } + if math.IsInf(left, 0) && math.IsInf(right, 0) { + return NaNValue() + } + if left == 0 && right == 0 { + return NaNValue() + } + if math.IsInf(left, 0) { + if math.Signbit(left) == math.Signbit(right) { + return positiveInfinityValue() + } else { + return negativeInfinityValue() + } + } + if math.IsInf(right, 0) { + if math.Signbit(left) == math.Signbit(right) { + return positiveZeroValue() + } else { + return negativeZeroValue() + } + } + if right == 0 { + if math.Signbit(left) == math.Signbit(right) { + return positiveInfinityValue() + } else { + return negativeInfinityValue() + } + } + return toValue_float64(left / right) +} + +func (self *_runtime) evaluateModulo(left float64, right float64) Value { + // TODO 11.5.3 + return Value{} +} + +func (self *_runtime) calculateBinaryExpression(operator token.Token, left Value, right Value) Value { + + leftValue := left.resolve() + + switch operator { + + // Additive + case token.PLUS: + leftValue = toPrimitive(leftValue) + rightValue := right.resolve() + rightValue = toPrimitive(rightValue) + + if leftValue.IsString() || rightValue.IsString() { + return toValue_string(strings.Join([]string{leftValue.string(), rightValue.string()}, "")) + } else { + return toValue_float64(leftValue.float64() + rightValue.float64()) + } + case token.MINUS: + rightValue := right.resolve() + return toValue_float64(leftValue.float64() - rightValue.float64()) + + // Multiplicative + case token.MULTIPLY: + rightValue := right.resolve() + return toValue_float64(leftValue.float64() * rightValue.float64()) + case token.SLASH: + rightValue := right.resolve() + return self.evaluateDivide(leftValue.float64(), rightValue.float64()) + case token.REMAINDER: + rightValue := right.resolve() + return toValue_float64(math.Mod(leftValue.float64(), rightValue.float64())) + + // Logical + case token.LOGICAL_AND: + left := leftValue.bool() + if !left { + return falseValue + } + return toValue_bool(right.resolve().bool()) + case token.LOGICAL_OR: + left := leftValue.bool() + if left { + return trueValue + } + return toValue_bool(right.resolve().bool()) + + // Bitwise + case token.AND: + rightValue := right.resolve() + return toValue_int32(toInt32(leftValue) & toInt32(rightValue)) + case token.OR: + rightValue := right.resolve() + return toValue_int32(toInt32(leftValue) | toInt32(rightValue)) + case token.EXCLUSIVE_OR: + rightValue := right.resolve() + return toValue_int32(toInt32(leftValue) ^ toInt32(rightValue)) + + // Shift + // (Masking of 0x1f is to restrict the shift to a maximum of 31 places) + case token.SHIFT_LEFT: + rightValue := right.resolve() + return toValue_int32(toInt32(leftValue) << (toUint32(rightValue) & 0x1f)) + case token.SHIFT_RIGHT: + rightValue := right.resolve() + return toValue_int32(toInt32(leftValue) >> (toUint32(rightValue) & 0x1f)) + case token.UNSIGNED_SHIFT_RIGHT: + rightValue := right.resolve() + // Shifting an unsigned integer is a logical shift + return toValue_uint32(toUint32(leftValue) >> (toUint32(rightValue) & 0x1f)) + + case token.INSTANCEOF: + rightValue := right.resolve() + if !rightValue.IsObject() { + panic(self.panicTypeError("Expecting a function in instanceof check, but got: %v", rightValue)) + } + return toValue_bool(rightValue._object().hasInstance(leftValue)) + + case token.IN: + rightValue := right.resolve() + if !rightValue.IsObject() { + panic(self.panicTypeError()) + } + return toValue_bool(rightValue._object().hasProperty(leftValue.string())) + } + + panic(hereBeDragons(operator)) +} + +func valueKindDispatchKey(left _valueKind, right _valueKind) int { + return (int(left) << 2) + int(right) +} + +var equalDispatch map[int](func(Value, Value) bool) = makeEqualDispatch() + +func makeEqualDispatch() map[int](func(Value, Value) bool) { + key := valueKindDispatchKey + return map[int](func(Value, Value) bool){ + + key(valueNumber, valueObject): func(x Value, y Value) bool { return x.float64() == y.float64() }, + key(valueString, valueObject): func(x Value, y Value) bool { return x.float64() == y.float64() }, + key(valueObject, valueNumber): func(x Value, y Value) bool { return x.float64() == y.float64() }, + key(valueObject, valueString): func(x Value, y Value) bool { return x.float64() == y.float64() }, + } +} + +type _lessThanResult int + +const ( + lessThanFalse _lessThanResult = iota + lessThanTrue + lessThanUndefined +) + +func calculateLessThan(left Value, right Value, leftFirst bool) _lessThanResult { + + x := Value{} + y := x + + if leftFirst { + x = toNumberPrimitive(left) + y = toNumberPrimitive(right) + } else { + y = toNumberPrimitive(right) + x = toNumberPrimitive(left) + } + + result := false + if x.kind != valueString || y.kind != valueString { + x, y := x.float64(), y.float64() + if math.IsNaN(x) || math.IsNaN(y) { + return lessThanUndefined + } + result = x < y + } else { + x, y := x.string(), y.string() + result = x < y + } + + if result { + return lessThanTrue + } + + return lessThanFalse +} + +// FIXME Probably a map is not the most efficient way to do this +var lessThanTable [4](map[_lessThanResult]bool) = [4](map[_lessThanResult]bool){ + // < + map[_lessThanResult]bool{ + lessThanFalse: false, + lessThanTrue: true, + lessThanUndefined: false, + }, + + // > + map[_lessThanResult]bool{ + lessThanFalse: false, + lessThanTrue: true, + lessThanUndefined: false, + }, + + // <= + map[_lessThanResult]bool{ + lessThanFalse: true, + lessThanTrue: false, + lessThanUndefined: false, + }, + + // >= + map[_lessThanResult]bool{ + lessThanFalse: true, + lessThanTrue: false, + lessThanUndefined: false, + }, +} + +func (self *_runtime) calculateComparison(comparator token.Token, left Value, right Value) bool { + + // FIXME Use strictEqualityComparison? + // TODO This might be redundant now (with regards to evaluateComparison) + x := left.resolve() + y := right.resolve() + + kindEqualKind := false + result := true + negate := false + + switch comparator { + case token.LESS: + result = lessThanTable[0][calculateLessThan(x, y, true)] + case token.GREATER: + result = lessThanTable[1][calculateLessThan(y, x, false)] + case token.LESS_OR_EQUAL: + result = lessThanTable[2][calculateLessThan(y, x, false)] + case token.GREATER_OR_EQUAL: + result = lessThanTable[3][calculateLessThan(x, y, true)] + case token.STRICT_NOT_EQUAL: + negate = true + fallthrough + case token.STRICT_EQUAL: + if x.kind != y.kind { + result = false + } else { + kindEqualKind = true + } + case token.NOT_EQUAL: + negate = true + fallthrough + case token.EQUAL: + if x.kind == y.kind { + kindEqualKind = true + } else if x.kind <= valueNull && y.kind <= valueNull { + result = true + } else if x.kind <= valueNull || y.kind <= valueNull { + result = false + } else if x.kind <= valueString && y.kind <= valueString { + result = x.float64() == y.float64() + } else if x.kind == valueBoolean { + result = self.calculateComparison(token.EQUAL, toValue_float64(x.float64()), y) + } else if y.kind == valueBoolean { + result = self.calculateComparison(token.EQUAL, x, toValue_float64(y.float64())) + } else if x.kind == valueObject { + result = self.calculateComparison(token.EQUAL, toPrimitive(x), y) + } else if y.kind == valueObject { + result = self.calculateComparison(token.EQUAL, x, toPrimitive(y)) + } else { + panic(hereBeDragons("Unable to test for equality: %v ==? %v", x, y)) + } + default: + panic(fmt.Errorf("Unknown comparator %s", comparator.String())) + } + + if kindEqualKind { + switch x.kind { + case valueUndefined, valueNull: + result = true + case valueNumber: + x := x.float64() + y := y.float64() + if math.IsNaN(x) || math.IsNaN(y) { + result = false + } else { + result = x == y + } + case valueString: + result = x.string() == y.string() + case valueBoolean: + result = x.bool() == y.bool() + case valueObject: + result = x._object() == y._object() + default: + goto ERROR + } + } + + if negate { + result = !result + } + + return result + +ERROR: + panic(hereBeDragons("%v (%v) %s %v (%v)", x, x.kind, comparator, y, y.kind)) +} diff --git a/vendor/github.com/robertkrimen/otto/file/README.markdown b/vendor/github.com/robertkrimen/otto/file/README.markdown new file mode 100644 index 000000000..79757baa8 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/file/README.markdown @@ -0,0 +1,110 @@ +# file +-- + import "github.com/robertkrimen/otto/file" + +Package file encapsulates the file abstractions used by the ast & parser. + +## Usage + +#### type File + +```go +type File struct { +} +``` + + +#### func NewFile + +```go +func NewFile(filename, src string, base int) *File +``` + +#### func (*File) Base + +```go +func (fl *File) Base() int +``` + +#### func (*File) Name + +```go +func (fl *File) Name() string +``` + +#### func (*File) Source + +```go +func (fl *File) Source() string +``` + +#### type FileSet + +```go +type FileSet struct { +} +``` + +A FileSet represents a set of source files. + +#### func (*FileSet) AddFile + +```go +func (self *FileSet) AddFile(filename, src string) int +``` +AddFile adds a new file with the given filename and src. + +This an internal method, but exported for cross-package use. + +#### func (*FileSet) File + +```go +func (self *FileSet) File(idx Idx) *File +``` + +#### func (*FileSet) Position + +```go +func (self *FileSet) Position(idx Idx) *Position +``` +Position converts an Idx in the FileSet into a Position. + +#### type Idx + +```go +type Idx int +``` + +Idx is a compact encoding of a source position within a file set. It can be +converted into a Position for a more convenient, but much larger, +representation. + +#### type Position + +```go +type Position struct { + Filename string // The filename where the error occurred, if any + Offset int // The src offset + Line int // The line number, starting at 1 + Column int // The column number, starting at 1 (The character count) + +} +``` + +Position describes an arbitrary source position including the filename, line, +and column location. + +#### func (*Position) String + +```go +func (self *Position) String() string +``` +String returns a string in one of several forms: + + file:line:column A valid position with filename + line:column A valid position without filename + file An invalid position with filename + - An invalid position without filename + +-- +**godocdown** http://github.com/robertkrimen/godocdown diff --git a/vendor/github.com/robertkrimen/otto/file/file.go b/vendor/github.com/robertkrimen/otto/file/file.go new file mode 100644 index 000000000..9093206e8 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/file/file.go @@ -0,0 +1,164 @@ +// Package file encapsulates the file abstractions used by the ast & parser. +// +package file + +import ( + "fmt" + "strings" + + "gopkg.in/sourcemap.v1" +) + +// Idx is a compact encoding of a source position within a file set. +// It can be converted into a Position for a more convenient, but much +// larger, representation. +type Idx int + +// Position describes an arbitrary source position +// including the filename, line, and column location. +type Position struct { + Filename string // The filename where the error occurred, if any + Offset int // The src offset + Line int // The line number, starting at 1 + Column int // The column number, starting at 1 (The character count) + +} + +// A Position is valid if the line number is > 0. + +func (self *Position) isValid() bool { + return self.Line > 0 +} + +// String returns a string in one of several forms: +// +// file:line:column A valid position with filename +// line:column A valid position without filename +// file An invalid position with filename +// - An invalid position without filename +// +func (self *Position) String() string { + str := self.Filename + if self.isValid() { + if str != "" { + str += ":" + } + str += fmt.Sprintf("%d:%d", self.Line, self.Column) + } + if str == "" { + str = "-" + } + return str +} + +// FileSet + +// A FileSet represents a set of source files. +type FileSet struct { + files []*File + last *File +} + +// AddFile adds a new file with the given filename and src. +// +// This an internal method, but exported for cross-package use. +func (self *FileSet) AddFile(filename, src string) int { + base := self.nextBase() + file := &File{ + name: filename, + src: src, + base: base, + } + self.files = append(self.files, file) + self.last = file + return base +} + +func (self *FileSet) nextBase() int { + if self.last == nil { + return 1 + } + return self.last.base + len(self.last.src) + 1 +} + +func (self *FileSet) File(idx Idx) *File { + for _, file := range self.files { + if idx <= Idx(file.base+len(file.src)) { + return file + } + } + return nil +} + +// Position converts an Idx in the FileSet into a Position. +func (self *FileSet) Position(idx Idx) *Position { + for _, file := range self.files { + if idx <= Idx(file.base+len(file.src)) { + return file.Position(idx - Idx(file.base)) + } + } + + return nil +} + +type File struct { + name string + src string + base int // This will always be 1 or greater + sm *sourcemap.Consumer +} + +func NewFile(filename, src string, base int) *File { + return &File{ + name: filename, + src: src, + base: base, + } +} + +func (fl *File) WithSourceMap(sm *sourcemap.Consumer) *File { + fl.sm = sm + return fl +} + +func (fl *File) Name() string { + return fl.name +} + +func (fl *File) Source() string { + return fl.src +} + +func (fl *File) Base() int { + return fl.base +} + +func (fl *File) Position(idx Idx) *Position { + position := &Position{} + + offset := int(idx) - fl.base + + if offset >= len(fl.src) || offset < 0 { + return nil + } + + src := fl.src[:offset] + + position.Filename = fl.name + position.Offset = offset + position.Line = strings.Count(src, "\n") + 1 + + if index := strings.LastIndex(src, "\n"); index >= 0 { + position.Column = offset - index + } else { + position.Column = len(src) + 1 + } + + if fl.sm != nil { + if f, _, l, c, ok := fl.sm.Source(position.Line, position.Column); ok { + position.Filename, position.Line, position.Column = f, l, c + } + } + + return position +} diff --git a/vendor/github.com/robertkrimen/otto/global.go b/vendor/github.com/robertkrimen/otto/global.go new file mode 100644 index 000000000..eb2a2a0fb --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/global.go @@ -0,0 +1,221 @@ +package otto + +import ( + "strconv" + "time" +) + +var ( + prototypeValueObject = interface{}(nil) + prototypeValueFunction = _nativeFunctionObject{ + call: func(_ FunctionCall) Value { + return Value{} + }, + } + prototypeValueString = _stringASCII("") + // TODO Make this just false? + prototypeValueBoolean = Value{ + kind: valueBoolean, + value: false, + } + prototypeValueNumber = Value{ + kind: valueNumber, + value: 0, + } + prototypeValueDate = _dateObject{ + epoch: 0, + isNaN: false, + time: time.Unix(0, 0).UTC(), + value: Value{ + kind: valueNumber, + value: 0, + }, + } + prototypeValueRegExp = _regExpObject{ + regularExpression: nil, + global: false, + ignoreCase: false, + multiline: false, + source: "", + flags: "", + } +) + +func newContext() *_runtime { + + self := &_runtime{} + + self.globalStash = self.newObjectStash(nil, nil) + self.globalObject = self.globalStash.object + + _newContext(self) + + self.eval = self.globalObject.property["eval"].value.(Value).value.(*_object) + self.globalObject.prototype = self.global.ObjectPrototype + + return self +} + +func (runtime *_runtime) newBaseObject() *_object { + self := newObject(runtime, "") + return self +} + +func (runtime *_runtime) newClassObject(class string) *_object { + return newObject(runtime, class) +} + +func (runtime *_runtime) newPrimitiveObject(class string, value Value) *_object { + self := runtime.newClassObject(class) + self.value = value + return self +} + +func (self *_object) primitiveValue() Value { + switch value := self.value.(type) { + case Value: + return value + case _stringObject: + return toValue_string(value.String()) + } + return Value{} +} + +func (self *_object) hasPrimitive() bool { + switch self.value.(type) { + case Value, _stringObject: + return true + } + return false +} + +func (runtime *_runtime) newObject() *_object { + self := runtime.newClassObject("Object") + self.prototype = runtime.global.ObjectPrototype + return self +} + +func (runtime *_runtime) newArray(length uint32) *_object { + self := runtime.newArrayObject(length) + self.prototype = runtime.global.ArrayPrototype + return self +} + +func (runtime *_runtime) newArrayOf(valueArray []Value) *_object { + self := runtime.newArray(uint32(len(valueArray))) + for index, value := range valueArray { + if value.isEmpty() { + continue + } + self.defineProperty(strconv.FormatInt(int64(index), 10), value, 0111, false) + } + return self +} + +func (runtime *_runtime) newString(value Value) *_object { + self := runtime.newStringObject(value) + self.prototype = runtime.global.StringPrototype + return self +} + +func (runtime *_runtime) newBoolean(value Value) *_object { + self := runtime.newBooleanObject(value) + self.prototype = runtime.global.BooleanPrototype + return self +} + +func (runtime *_runtime) newNumber(value Value) *_object { + self := runtime.newNumberObject(value) + self.prototype = runtime.global.NumberPrototype + return self +} + +func (runtime *_runtime) newRegExp(patternValue Value, flagsValue Value) *_object { + + pattern := "" + flags := "" + if object := patternValue._object(); object != nil && object.class == "RegExp" { + if flagsValue.IsDefined() { + panic(runtime.panicTypeError("Cannot supply flags when constructing one RegExp from another")) + } + regExp := object.regExpValue() + pattern = regExp.source + flags = regExp.flags + } else { + if patternValue.IsDefined() { + pattern = patternValue.string() + } + if flagsValue.IsDefined() { + flags = flagsValue.string() + } + } + + return runtime._newRegExp(pattern, flags) +} + +func (runtime *_runtime) _newRegExp(pattern string, flags string) *_object { + self := runtime.newRegExpObject(pattern, flags) + self.prototype = runtime.global.RegExpPrototype + return self +} + +// TODO Should (probably) be one argument, right? This is redundant +func (runtime *_runtime) newDate(epoch float64) *_object { + self := runtime.newDateObject(epoch) + self.prototype = runtime.global.DatePrototype + return self +} + +func (runtime *_runtime) newError(name string, message Value, stackFramesToPop int) *_object { + var self *_object + switch name { + case "EvalError": + return runtime.newEvalError(message) + case "TypeError": + return runtime.newTypeError(message) + case "RangeError": + return runtime.newRangeError(message) + case "ReferenceError": + return runtime.newReferenceError(message) + case "SyntaxError": + return runtime.newSyntaxError(message) + case "URIError": + return runtime.newURIError(message) + } + + self = runtime.newErrorObject(name, message, stackFramesToPop) + self.prototype = runtime.global.ErrorPrototype + if name != "" { + self.defineProperty("name", toValue_string(name), 0111, false) + } + return self +} + +func (runtime *_runtime) newNativeFunction(name, file string, line int, _nativeFunction _nativeFunction) *_object { + self := runtime.newNativeFunctionObject(name, file, line, _nativeFunction, 0) + self.prototype = runtime.global.FunctionPrototype + prototype := runtime.newObject() + self.defineProperty("prototype", toValue_object(prototype), 0100, false) + prototype.defineProperty("constructor", toValue_object(self), 0100, false) + return self +} + +func (runtime *_runtime) newNodeFunction(node *_nodeFunctionLiteral, scopeEnvironment _stash) *_object { + // TODO Implement 13.2 fully + self := runtime.newNodeFunctionObject(node, scopeEnvironment) + self.prototype = runtime.global.FunctionPrototype + prototype := runtime.newObject() + self.defineProperty("prototype", toValue_object(prototype), 0100, false) + prototype.defineProperty("constructor", toValue_object(self), 0101, false) + return self +} + +// FIXME Only in one place... +func (runtime *_runtime) newBoundFunction(target *_object, this Value, argumentList []Value) *_object { + self := runtime.newBoundFunctionObject(target, this, argumentList) + self.prototype = runtime.global.FunctionPrototype + prototype := runtime.newObject() + self.defineProperty("prototype", toValue_object(prototype), 0100, false) + prototype.defineProperty("constructor", toValue_object(self), 0100, false) + return self +} diff --git a/vendor/github.com/robertkrimen/otto/inline.go b/vendor/github.com/robertkrimen/otto/inline.go new file mode 100644 index 000000000..6e5df8393 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/inline.go @@ -0,0 +1,6649 @@ +package otto + +import ( + "math" +) + +func _newContext(runtime *_runtime) { + { + runtime.global.ObjectPrototype = &_object{ + runtime: runtime, + class: "Object", + objectClass: _classObject, + prototype: nil, + extensible: true, + value: prototypeValueObject, + } + } + { + runtime.global.FunctionPrototype = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.ObjectPrototype, + extensible: true, + value: prototypeValueFunction, + } + } + { + valueOf_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "valueOf", + call: builtinObject_valueOf, + }, + } + toString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toString", + call: builtinObject_toString, + }, + } + toLocaleString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toLocaleString", + call: builtinObject_toLocaleString, + }, + } + hasOwnProperty_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "hasOwnProperty", + call: builtinObject_hasOwnProperty, + }, + } + isPrototypeOf_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "isPrototypeOf", + call: builtinObject_isPrototypeOf, + }, + } + propertyIsEnumerable_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "propertyIsEnumerable", + call: builtinObject_propertyIsEnumerable, + }, + } + runtime.global.ObjectPrototype.property = map[string]_property{ + "valueOf": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: valueOf_function, + }, + }, + "toString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toString_function, + }, + }, + "toLocaleString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toLocaleString_function, + }, + }, + "hasOwnProperty": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: hasOwnProperty_function, + }, + }, + "isPrototypeOf": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: isPrototypeOf_function, + }, + }, + "propertyIsEnumerable": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: propertyIsEnumerable_function, + }, + }, + "constructor": _property{ + mode: 0101, + value: Value{}, + }, + } + runtime.global.ObjectPrototype.propertyOrder = []string{ + "valueOf", + "toString", + "toLocaleString", + "hasOwnProperty", + "isPrototypeOf", + "propertyIsEnumerable", + "constructor", + } + } + { + toString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toString", + call: builtinFunction_toString, + }, + } + apply_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "apply", + call: builtinFunction_apply, + }, + } + call_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "call", + call: builtinFunction_call, + }, + } + bind_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "bind", + call: builtinFunction_bind, + }, + } + runtime.global.FunctionPrototype.property = map[string]_property{ + "toString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toString_function, + }, + }, + "apply": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: apply_function, + }, + }, + "call": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: call_function, + }, + }, + "bind": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: bind_function, + }, + }, + "constructor": _property{ + mode: 0101, + value: Value{}, + }, + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + } + runtime.global.FunctionPrototype.propertyOrder = []string{ + "toString", + "apply", + "call", + "bind", + "constructor", + "length", + } + } + { + getPrototypeOf_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getPrototypeOf", + call: builtinObject_getPrototypeOf, + }, + } + getOwnPropertyDescriptor_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getOwnPropertyDescriptor", + call: builtinObject_getOwnPropertyDescriptor, + }, + } + defineProperty_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 3, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "defineProperty", + call: builtinObject_defineProperty, + }, + } + defineProperties_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "defineProperties", + call: builtinObject_defineProperties, + }, + } + create_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "create", + call: builtinObject_create, + }, + } + isExtensible_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "isExtensible", + call: builtinObject_isExtensible, + }, + } + preventExtensions_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "preventExtensions", + call: builtinObject_preventExtensions, + }, + } + isSealed_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "isSealed", + call: builtinObject_isSealed, + }, + } + seal_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "seal", + call: builtinObject_seal, + }, + } + isFrozen_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "isFrozen", + call: builtinObject_isFrozen, + }, + } + freeze_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "freeze", + call: builtinObject_freeze, + }, + } + keys_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "keys", + call: builtinObject_keys, + }, + } + getOwnPropertyNames_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getOwnPropertyNames", + call: builtinObject_getOwnPropertyNames, + }, + } + runtime.global.Object = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "Object", + call: builtinObject, + construct: builtinNewObject, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.ObjectPrototype, + }, + }, + "getPrototypeOf": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getPrototypeOf_function, + }, + }, + "getOwnPropertyDescriptor": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getOwnPropertyDescriptor_function, + }, + }, + "defineProperty": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: defineProperty_function, + }, + }, + "defineProperties": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: defineProperties_function, + }, + }, + "create": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: create_function, + }, + }, + "isExtensible": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: isExtensible_function, + }, + }, + "preventExtensions": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: preventExtensions_function, + }, + }, + "isSealed": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: isSealed_function, + }, + }, + "seal": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: seal_function, + }, + }, + "isFrozen": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: isFrozen_function, + }, + }, + "freeze": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: freeze_function, + }, + }, + "keys": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: keys_function, + }, + }, + "getOwnPropertyNames": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getOwnPropertyNames_function, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + "getPrototypeOf", + "getOwnPropertyDescriptor", + "defineProperty", + "defineProperties", + "create", + "isExtensible", + "preventExtensions", + "isSealed", + "seal", + "isFrozen", + "freeze", + "keys", + "getOwnPropertyNames", + }, + } + runtime.global.ObjectPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Object, + }, + } + } + { + Function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "Function", + call: builtinFunction, + construct: builtinNewFunction, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.FunctionPrototype, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + }, + } + runtime.global.Function = Function + runtime.global.FunctionPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Function, + }, + } + } + { + toString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toString", + call: builtinArray_toString, + }, + } + toLocaleString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toLocaleString", + call: builtinArray_toLocaleString, + }, + } + concat_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "concat", + call: builtinArray_concat, + }, + } + join_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "join", + call: builtinArray_join, + }, + } + splice_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "splice", + call: builtinArray_splice, + }, + } + shift_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "shift", + call: builtinArray_shift, + }, + } + pop_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "pop", + call: builtinArray_pop, + }, + } + push_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "push", + call: builtinArray_push, + }, + } + slice_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "slice", + call: builtinArray_slice, + }, + } + unshift_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "unshift", + call: builtinArray_unshift, + }, + } + reverse_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "reverse", + call: builtinArray_reverse, + }, + } + sort_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "sort", + call: builtinArray_sort, + }, + } + indexOf_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "indexOf", + call: builtinArray_indexOf, + }, + } + lastIndexOf_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "lastIndexOf", + call: builtinArray_lastIndexOf, + }, + } + every_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "every", + call: builtinArray_every, + }, + } + some_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "some", + call: builtinArray_some, + }, + } + forEach_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "forEach", + call: builtinArray_forEach, + }, + } + map_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "map", + call: builtinArray_map, + }, + } + filter_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "filter", + call: builtinArray_filter, + }, + } + reduce_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "reduce", + call: builtinArray_reduce, + }, + } + reduceRight_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "reduceRight", + call: builtinArray_reduceRight, + }, + } + isArray_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "isArray", + call: builtinArray_isArray, + }, + } + runtime.global.ArrayPrototype = &_object{ + runtime: runtime, + class: "Array", + objectClass: _classArray, + prototype: runtime.global.ObjectPrototype, + extensible: true, + value: nil, + property: map[string]_property{ + "length": _property{ + mode: 0100, + value: Value{ + kind: valueNumber, + value: uint32(0), + }, + }, + "toString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toString_function, + }, + }, + "toLocaleString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toLocaleString_function, + }, + }, + "concat": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: concat_function, + }, + }, + "join": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: join_function, + }, + }, + "splice": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: splice_function, + }, + }, + "shift": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: shift_function, + }, + }, + "pop": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: pop_function, + }, + }, + "push": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: push_function, + }, + }, + "slice": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: slice_function, + }, + }, + "unshift": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: unshift_function, + }, + }, + "reverse": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: reverse_function, + }, + }, + "sort": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: sort_function, + }, + }, + "indexOf": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: indexOf_function, + }, + }, + "lastIndexOf": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: lastIndexOf_function, + }, + }, + "every": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: every_function, + }, + }, + "some": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: some_function, + }, + }, + "forEach": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: forEach_function, + }, + }, + "map": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: map_function, + }, + }, + "filter": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: filter_function, + }, + }, + "reduce": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: reduce_function, + }, + }, + "reduceRight": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: reduceRight_function, + }, + }, + }, + propertyOrder: []string{ + "length", + "toString", + "toLocaleString", + "concat", + "join", + "splice", + "shift", + "pop", + "push", + "slice", + "unshift", + "reverse", + "sort", + "indexOf", + "lastIndexOf", + "every", + "some", + "forEach", + "map", + "filter", + "reduce", + "reduceRight", + }, + } + runtime.global.Array = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "Array", + call: builtinArray, + construct: builtinNewArray, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.ArrayPrototype, + }, + }, + "isArray": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: isArray_function, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + "isArray", + }, + } + runtime.global.ArrayPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Array, + }, + } + } + { + toString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toString", + call: builtinString_toString, + }, + } + valueOf_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "valueOf", + call: builtinString_valueOf, + }, + } + charAt_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "charAt", + call: builtinString_charAt, + }, + } + charCodeAt_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "charCodeAt", + call: builtinString_charCodeAt, + }, + } + concat_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "concat", + call: builtinString_concat, + }, + } + indexOf_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "indexOf", + call: builtinString_indexOf, + }, + } + lastIndexOf_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "lastIndexOf", + call: builtinString_lastIndexOf, + }, + } + match_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "match", + call: builtinString_match, + }, + } + replace_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "replace", + call: builtinString_replace, + }, + } + search_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "search", + call: builtinString_search, + }, + } + split_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "split", + call: builtinString_split, + }, + } + slice_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "slice", + call: builtinString_slice, + }, + } + substring_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "substring", + call: builtinString_substring, + }, + } + toLowerCase_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toLowerCase", + call: builtinString_toLowerCase, + }, + } + toUpperCase_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toUpperCase", + call: builtinString_toUpperCase, + }, + } + substr_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "substr", + call: builtinString_substr, + }, + } + trim_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "trim", + call: builtinString_trim, + }, + } + trimLeft_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "trimLeft", + call: builtinString_trimLeft, + }, + } + trimRight_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "trimRight", + call: builtinString_trimRight, + }, + } + localeCompare_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "localeCompare", + call: builtinString_localeCompare, + }, + } + toLocaleLowerCase_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toLocaleLowerCase", + call: builtinString_toLocaleLowerCase, + }, + } + toLocaleUpperCase_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toLocaleUpperCase", + call: builtinString_toLocaleUpperCase, + }, + } + fromCharCode_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "fromCharCode", + call: builtinString_fromCharCode, + }, + } + runtime.global.StringPrototype = &_object{ + runtime: runtime, + class: "String", + objectClass: _classString, + prototype: runtime.global.ObjectPrototype, + extensible: true, + value: prototypeValueString, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: int(0), + }, + }, + "toString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toString_function, + }, + }, + "valueOf": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: valueOf_function, + }, + }, + "charAt": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: charAt_function, + }, + }, + "charCodeAt": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: charCodeAt_function, + }, + }, + "concat": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: concat_function, + }, + }, + "indexOf": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: indexOf_function, + }, + }, + "lastIndexOf": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: lastIndexOf_function, + }, + }, + "match": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: match_function, + }, + }, + "replace": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: replace_function, + }, + }, + "search": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: search_function, + }, + }, + "split": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: split_function, + }, + }, + "slice": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: slice_function, + }, + }, + "substring": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: substring_function, + }, + }, + "toLowerCase": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toLowerCase_function, + }, + }, + "toUpperCase": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toUpperCase_function, + }, + }, + "substr": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: substr_function, + }, + }, + "trim": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: trim_function, + }, + }, + "trimLeft": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: trimLeft_function, + }, + }, + "trimRight": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: trimRight_function, + }, + }, + "localeCompare": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: localeCompare_function, + }, + }, + "toLocaleLowerCase": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toLocaleLowerCase_function, + }, + }, + "toLocaleUpperCase": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toLocaleUpperCase_function, + }, + }, + }, + propertyOrder: []string{ + "length", + "toString", + "valueOf", + "charAt", + "charCodeAt", + "concat", + "indexOf", + "lastIndexOf", + "match", + "replace", + "search", + "split", + "slice", + "substring", + "toLowerCase", + "toUpperCase", + "substr", + "trim", + "trimLeft", + "trimRight", + "localeCompare", + "toLocaleLowerCase", + "toLocaleUpperCase", + }, + } + runtime.global.String = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "String", + call: builtinString, + construct: builtinNewString, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.StringPrototype, + }, + }, + "fromCharCode": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: fromCharCode_function, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + "fromCharCode", + }, + } + runtime.global.StringPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.String, + }, + } + } + { + toString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toString", + call: builtinBoolean_toString, + }, + } + valueOf_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "valueOf", + call: builtinBoolean_valueOf, + }, + } + runtime.global.BooleanPrototype = &_object{ + runtime: runtime, + class: "Boolean", + objectClass: _classObject, + prototype: runtime.global.ObjectPrototype, + extensible: true, + value: prototypeValueBoolean, + property: map[string]_property{ + "toString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toString_function, + }, + }, + "valueOf": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: valueOf_function, + }, + }, + }, + propertyOrder: []string{ + "toString", + "valueOf", + }, + } + runtime.global.Boolean = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "Boolean", + call: builtinBoolean, + construct: builtinNewBoolean, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.BooleanPrototype, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + }, + } + runtime.global.BooleanPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Boolean, + }, + } + } + { + toString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toString", + call: builtinNumber_toString, + }, + } + valueOf_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "valueOf", + call: builtinNumber_valueOf, + }, + } + toFixed_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toFixed", + call: builtinNumber_toFixed, + }, + } + toExponential_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toExponential", + call: builtinNumber_toExponential, + }, + } + toPrecision_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toPrecision", + call: builtinNumber_toPrecision, + }, + } + toLocaleString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toLocaleString", + call: builtinNumber_toLocaleString, + }, + } + runtime.global.NumberPrototype = &_object{ + runtime: runtime, + class: "Number", + objectClass: _classObject, + prototype: runtime.global.ObjectPrototype, + extensible: true, + value: prototypeValueNumber, + property: map[string]_property{ + "toString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toString_function, + }, + }, + "valueOf": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: valueOf_function, + }, + }, + "toFixed": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toFixed_function, + }, + }, + "toExponential": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toExponential_function, + }, + }, + "toPrecision": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toPrecision_function, + }, + }, + "toLocaleString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toLocaleString_function, + }, + }, + }, + propertyOrder: []string{ + "toString", + "valueOf", + "toFixed", + "toExponential", + "toPrecision", + "toLocaleString", + }, + } + runtime.global.Number = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "Number", + call: builtinNumber, + construct: builtinNewNumber, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.NumberPrototype, + }, + }, + "MAX_VALUE": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.MaxFloat64, + }, + }, + "MIN_VALUE": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.SmallestNonzeroFloat64, + }, + }, + "NaN": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.NaN(), + }, + }, + "NEGATIVE_INFINITY": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.Inf(-1), + }, + }, + "POSITIVE_INFINITY": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.Inf(+1), + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + "MAX_VALUE", + "MIN_VALUE", + "NaN", + "NEGATIVE_INFINITY", + "POSITIVE_INFINITY", + }, + } + runtime.global.NumberPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Number, + }, + } + } + { + abs_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "abs", + call: builtinMath_abs, + }, + } + acos_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "acos", + call: builtinMath_acos, + }, + } + asin_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "asin", + call: builtinMath_asin, + }, + } + atan_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "atan", + call: builtinMath_atan, + }, + } + atan2_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "atan2", + call: builtinMath_atan2, + }, + } + ceil_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "ceil", + call: builtinMath_ceil, + }, + } + cos_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "cos", + call: builtinMath_cos, + }, + } + exp_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "exp", + call: builtinMath_exp, + }, + } + floor_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "floor", + call: builtinMath_floor, + }, + } + log_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "log", + call: builtinMath_log, + }, + } + max_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "max", + call: builtinMath_max, + }, + } + min_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "min", + call: builtinMath_min, + }, + } + pow_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "pow", + call: builtinMath_pow, + }, + } + random_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "random", + call: builtinMath_random, + }, + } + round_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "round", + call: builtinMath_round, + }, + } + sin_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "sin", + call: builtinMath_sin, + }, + } + sqrt_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "sqrt", + call: builtinMath_sqrt, + }, + } + tan_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "tan", + call: builtinMath_tan, + }, + } + runtime.global.Math = &_object{ + runtime: runtime, + class: "Math", + objectClass: _classObject, + prototype: runtime.global.ObjectPrototype, + extensible: true, + property: map[string]_property{ + "abs": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: abs_function, + }, + }, + "acos": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: acos_function, + }, + }, + "asin": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: asin_function, + }, + }, + "atan": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: atan_function, + }, + }, + "atan2": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: atan2_function, + }, + }, + "ceil": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: ceil_function, + }, + }, + "cos": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: cos_function, + }, + }, + "exp": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: exp_function, + }, + }, + "floor": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: floor_function, + }, + }, + "log": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: log_function, + }, + }, + "max": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: max_function, + }, + }, + "min": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: min_function, + }, + }, + "pow": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: pow_function, + }, + }, + "random": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: random_function, + }, + }, + "round": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: round_function, + }, + }, + "sin": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: sin_function, + }, + }, + "sqrt": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: sqrt_function, + }, + }, + "tan": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: tan_function, + }, + }, + "E": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.E, + }, + }, + "LN10": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.Ln10, + }, + }, + "LN2": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.Ln2, + }, + }, + "LOG2E": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.Log2E, + }, + }, + "LOG10E": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.Log10E, + }, + }, + "PI": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.Pi, + }, + }, + "SQRT1_2": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: sqrt1_2, + }, + }, + "SQRT2": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.Sqrt2, + }, + }, + }, + propertyOrder: []string{ + "abs", + "acos", + "asin", + "atan", + "atan2", + "ceil", + "cos", + "exp", + "floor", + "log", + "max", + "min", + "pow", + "random", + "round", + "sin", + "sqrt", + "tan", + "E", + "LN10", + "LN2", + "LOG2E", + "LOG10E", + "PI", + "SQRT1_2", + "SQRT2", + }, + } + } + { + toString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toString", + call: builtinDate_toString, + }, + } + toDateString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toDateString", + call: builtinDate_toDateString, + }, + } + toTimeString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toTimeString", + call: builtinDate_toTimeString, + }, + } + toUTCString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toUTCString", + call: builtinDate_toUTCString, + }, + } + toISOString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toISOString", + call: builtinDate_toISOString, + }, + } + toJSON_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toJSON", + call: builtinDate_toJSON, + }, + } + toGMTString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toGMTString", + call: builtinDate_toGMTString, + }, + } + toLocaleString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toLocaleString", + call: builtinDate_toLocaleString, + }, + } + toLocaleDateString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toLocaleDateString", + call: builtinDate_toLocaleDateString, + }, + } + toLocaleTimeString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toLocaleTimeString", + call: builtinDate_toLocaleTimeString, + }, + } + valueOf_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "valueOf", + call: builtinDate_valueOf, + }, + } + getTime_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getTime", + call: builtinDate_getTime, + }, + } + getYear_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getYear", + call: builtinDate_getYear, + }, + } + getFullYear_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getFullYear", + call: builtinDate_getFullYear, + }, + } + getUTCFullYear_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getUTCFullYear", + call: builtinDate_getUTCFullYear, + }, + } + getMonth_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getMonth", + call: builtinDate_getMonth, + }, + } + getUTCMonth_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getUTCMonth", + call: builtinDate_getUTCMonth, + }, + } + getDate_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getDate", + call: builtinDate_getDate, + }, + } + getUTCDate_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getUTCDate", + call: builtinDate_getUTCDate, + }, + } + getDay_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getDay", + call: builtinDate_getDay, + }, + } + getUTCDay_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getUTCDay", + call: builtinDate_getUTCDay, + }, + } + getHours_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getHours", + call: builtinDate_getHours, + }, + } + getUTCHours_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getUTCHours", + call: builtinDate_getUTCHours, + }, + } + getMinutes_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getMinutes", + call: builtinDate_getMinutes, + }, + } + getUTCMinutes_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getUTCMinutes", + call: builtinDate_getUTCMinutes, + }, + } + getSeconds_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getSeconds", + call: builtinDate_getSeconds, + }, + } + getUTCSeconds_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getUTCSeconds", + call: builtinDate_getUTCSeconds, + }, + } + getMilliseconds_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getMilliseconds", + call: builtinDate_getMilliseconds, + }, + } + getUTCMilliseconds_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getUTCMilliseconds", + call: builtinDate_getUTCMilliseconds, + }, + } + getTimezoneOffset_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "getTimezoneOffset", + call: builtinDate_getTimezoneOffset, + }, + } + setTime_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setTime", + call: builtinDate_setTime, + }, + } + setMilliseconds_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setMilliseconds", + call: builtinDate_setMilliseconds, + }, + } + setUTCMilliseconds_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setUTCMilliseconds", + call: builtinDate_setUTCMilliseconds, + }, + } + setSeconds_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setSeconds", + call: builtinDate_setSeconds, + }, + } + setUTCSeconds_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setUTCSeconds", + call: builtinDate_setUTCSeconds, + }, + } + setMinutes_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 3, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setMinutes", + call: builtinDate_setMinutes, + }, + } + setUTCMinutes_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 3, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setUTCMinutes", + call: builtinDate_setUTCMinutes, + }, + } + setHours_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 4, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setHours", + call: builtinDate_setHours, + }, + } + setUTCHours_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 4, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setUTCHours", + call: builtinDate_setUTCHours, + }, + } + setDate_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setDate", + call: builtinDate_setDate, + }, + } + setUTCDate_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setUTCDate", + call: builtinDate_setUTCDate, + }, + } + setMonth_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setMonth", + call: builtinDate_setMonth, + }, + } + setUTCMonth_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setUTCMonth", + call: builtinDate_setUTCMonth, + }, + } + setYear_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setYear", + call: builtinDate_setYear, + }, + } + setFullYear_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 3, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setFullYear", + call: builtinDate_setFullYear, + }, + } + setUTCFullYear_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 3, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "setUTCFullYear", + call: builtinDate_setUTCFullYear, + }, + } + parse_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "parse", + call: builtinDate_parse, + }, + } + UTC_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 7, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "UTC", + call: builtinDate_UTC, + }, + } + now_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "now", + call: builtinDate_now, + }, + } + runtime.global.DatePrototype = &_object{ + runtime: runtime, + class: "Date", + objectClass: _classObject, + prototype: runtime.global.ObjectPrototype, + extensible: true, + value: prototypeValueDate, + property: map[string]_property{ + "toString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toString_function, + }, + }, + "toDateString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toDateString_function, + }, + }, + "toTimeString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toTimeString_function, + }, + }, + "toUTCString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toUTCString_function, + }, + }, + "toISOString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toISOString_function, + }, + }, + "toJSON": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toJSON_function, + }, + }, + "toGMTString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toGMTString_function, + }, + }, + "toLocaleString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toLocaleString_function, + }, + }, + "toLocaleDateString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toLocaleDateString_function, + }, + }, + "toLocaleTimeString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toLocaleTimeString_function, + }, + }, + "valueOf": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: valueOf_function, + }, + }, + "getTime": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getTime_function, + }, + }, + "getYear": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getYear_function, + }, + }, + "getFullYear": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getFullYear_function, + }, + }, + "getUTCFullYear": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getUTCFullYear_function, + }, + }, + "getMonth": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getMonth_function, + }, + }, + "getUTCMonth": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getUTCMonth_function, + }, + }, + "getDate": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getDate_function, + }, + }, + "getUTCDate": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getUTCDate_function, + }, + }, + "getDay": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getDay_function, + }, + }, + "getUTCDay": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getUTCDay_function, + }, + }, + "getHours": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getHours_function, + }, + }, + "getUTCHours": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getUTCHours_function, + }, + }, + "getMinutes": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getMinutes_function, + }, + }, + "getUTCMinutes": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getUTCMinutes_function, + }, + }, + "getSeconds": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getSeconds_function, + }, + }, + "getUTCSeconds": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getUTCSeconds_function, + }, + }, + "getMilliseconds": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getMilliseconds_function, + }, + }, + "getUTCMilliseconds": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getUTCMilliseconds_function, + }, + }, + "getTimezoneOffset": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: getTimezoneOffset_function, + }, + }, + "setTime": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setTime_function, + }, + }, + "setMilliseconds": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setMilliseconds_function, + }, + }, + "setUTCMilliseconds": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setUTCMilliseconds_function, + }, + }, + "setSeconds": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setSeconds_function, + }, + }, + "setUTCSeconds": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setUTCSeconds_function, + }, + }, + "setMinutes": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setMinutes_function, + }, + }, + "setUTCMinutes": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setUTCMinutes_function, + }, + }, + "setHours": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setHours_function, + }, + }, + "setUTCHours": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setUTCHours_function, + }, + }, + "setDate": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setDate_function, + }, + }, + "setUTCDate": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setUTCDate_function, + }, + }, + "setMonth": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setMonth_function, + }, + }, + "setUTCMonth": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setUTCMonth_function, + }, + }, + "setYear": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setYear_function, + }, + }, + "setFullYear": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setFullYear_function, + }, + }, + "setUTCFullYear": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: setUTCFullYear_function, + }, + }, + }, + propertyOrder: []string{ + "toString", + "toDateString", + "toTimeString", + "toUTCString", + "toISOString", + "toJSON", + "toGMTString", + "toLocaleString", + "toLocaleDateString", + "toLocaleTimeString", + "valueOf", + "getTime", + "getYear", + "getFullYear", + "getUTCFullYear", + "getMonth", + "getUTCMonth", + "getDate", + "getUTCDate", + "getDay", + "getUTCDay", + "getHours", + "getUTCHours", + "getMinutes", + "getUTCMinutes", + "getSeconds", + "getUTCSeconds", + "getMilliseconds", + "getUTCMilliseconds", + "getTimezoneOffset", + "setTime", + "setMilliseconds", + "setUTCMilliseconds", + "setSeconds", + "setUTCSeconds", + "setMinutes", + "setUTCMinutes", + "setHours", + "setUTCHours", + "setDate", + "setUTCDate", + "setMonth", + "setUTCMonth", + "setYear", + "setFullYear", + "setUTCFullYear", + }, + } + runtime.global.Date = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "Date", + call: builtinDate, + construct: builtinNewDate, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 7, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.DatePrototype, + }, + }, + "parse": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: parse_function, + }, + }, + "UTC": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: UTC_function, + }, + }, + "now": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: now_function, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + "parse", + "UTC", + "now", + }, + } + runtime.global.DatePrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Date, + }, + } + } + { + toString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toString", + call: builtinRegExp_toString, + }, + } + exec_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "exec", + call: builtinRegExp_exec, + }, + } + test_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "test", + call: builtinRegExp_test, + }, + } + compile_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "compile", + call: builtinRegExp_compile, + }, + } + runtime.global.RegExpPrototype = &_object{ + runtime: runtime, + class: "RegExp", + objectClass: _classObject, + prototype: runtime.global.ObjectPrototype, + extensible: true, + value: prototypeValueRegExp, + property: map[string]_property{ + "toString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toString_function, + }, + }, + "exec": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: exec_function, + }, + }, + "test": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: test_function, + }, + }, + "compile": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: compile_function, + }, + }, + }, + propertyOrder: []string{ + "toString", + "exec", + "test", + "compile", + }, + } + runtime.global.RegExp = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "RegExp", + call: builtinRegExp, + construct: builtinNewRegExp, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.RegExpPrototype, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + }, + } + runtime.global.RegExpPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.RegExp, + }, + } + } + { + toString_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "toString", + call: builtinError_toString, + }, + } + runtime.global.ErrorPrototype = &_object{ + runtime: runtime, + class: "Error", + objectClass: _classObject, + prototype: runtime.global.ObjectPrototype, + extensible: true, + value: nil, + property: map[string]_property{ + "toString": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: toString_function, + }, + }, + "name": _property{ + mode: 0101, + value: Value{ + kind: valueString, + value: "Error", + }, + }, + "message": _property{ + mode: 0101, + value: Value{ + kind: valueString, + value: "", + }, + }, + }, + propertyOrder: []string{ + "toString", + "name", + "message", + }, + } + runtime.global.Error = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "Error", + call: builtinError, + construct: builtinNewError, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.ErrorPrototype, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + }, + } + runtime.global.ErrorPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Error, + }, + } + } + { + runtime.global.EvalErrorPrototype = &_object{ + runtime: runtime, + class: "EvalError", + objectClass: _classObject, + prototype: runtime.global.ErrorPrototype, + extensible: true, + value: nil, + property: map[string]_property{ + "name": _property{ + mode: 0101, + value: Value{ + kind: valueString, + value: "EvalError", + }, + }, + }, + propertyOrder: []string{ + "name", + }, + } + runtime.global.EvalError = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "EvalError", + call: builtinEvalError, + construct: builtinNewEvalError, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.EvalErrorPrototype, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + }, + } + runtime.global.EvalErrorPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.EvalError, + }, + } + } + { + runtime.global.TypeErrorPrototype = &_object{ + runtime: runtime, + class: "TypeError", + objectClass: _classObject, + prototype: runtime.global.ErrorPrototype, + extensible: true, + value: nil, + property: map[string]_property{ + "name": _property{ + mode: 0101, + value: Value{ + kind: valueString, + value: "TypeError", + }, + }, + }, + propertyOrder: []string{ + "name", + }, + } + runtime.global.TypeError = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "TypeError", + call: builtinTypeError, + construct: builtinNewTypeError, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.TypeErrorPrototype, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + }, + } + runtime.global.TypeErrorPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.TypeError, + }, + } + } + { + runtime.global.RangeErrorPrototype = &_object{ + runtime: runtime, + class: "RangeError", + objectClass: _classObject, + prototype: runtime.global.ErrorPrototype, + extensible: true, + value: nil, + property: map[string]_property{ + "name": _property{ + mode: 0101, + value: Value{ + kind: valueString, + value: "RangeError", + }, + }, + }, + propertyOrder: []string{ + "name", + }, + } + runtime.global.RangeError = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "RangeError", + call: builtinRangeError, + construct: builtinNewRangeError, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.RangeErrorPrototype, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + }, + } + runtime.global.RangeErrorPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.RangeError, + }, + } + } + { + runtime.global.ReferenceErrorPrototype = &_object{ + runtime: runtime, + class: "ReferenceError", + objectClass: _classObject, + prototype: runtime.global.ErrorPrototype, + extensible: true, + value: nil, + property: map[string]_property{ + "name": _property{ + mode: 0101, + value: Value{ + kind: valueString, + value: "ReferenceError", + }, + }, + }, + propertyOrder: []string{ + "name", + }, + } + runtime.global.ReferenceError = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "ReferenceError", + call: builtinReferenceError, + construct: builtinNewReferenceError, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.ReferenceErrorPrototype, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + }, + } + runtime.global.ReferenceErrorPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.ReferenceError, + }, + } + } + { + runtime.global.SyntaxErrorPrototype = &_object{ + runtime: runtime, + class: "SyntaxError", + objectClass: _classObject, + prototype: runtime.global.ErrorPrototype, + extensible: true, + value: nil, + property: map[string]_property{ + "name": _property{ + mode: 0101, + value: Value{ + kind: valueString, + value: "SyntaxError", + }, + }, + }, + propertyOrder: []string{ + "name", + }, + } + runtime.global.SyntaxError = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "SyntaxError", + call: builtinSyntaxError, + construct: builtinNewSyntaxError, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.SyntaxErrorPrototype, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + }, + } + runtime.global.SyntaxErrorPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.SyntaxError, + }, + } + } + { + runtime.global.URIErrorPrototype = &_object{ + runtime: runtime, + class: "URIError", + objectClass: _classObject, + prototype: runtime.global.ErrorPrototype, + extensible: true, + value: nil, + property: map[string]_property{ + "name": _property{ + mode: 0101, + value: Value{ + kind: valueString, + value: "URIError", + }, + }, + }, + propertyOrder: []string{ + "name", + }, + } + runtime.global.URIError = &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: _nativeFunctionObject{ + name: "URIError", + call: builtinURIError, + construct: builtinNewURIError, + }, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + "prototype": _property{ + mode: 0, + value: Value{ + kind: valueObject, + value: runtime.global.URIErrorPrototype, + }, + }, + }, + propertyOrder: []string{ + "length", + "prototype", + }, + } + runtime.global.URIErrorPrototype.property["constructor"] = + _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.URIError, + }, + } + } + { + parse_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "parse", + call: builtinJSON_parse, + }, + } + stringify_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 3, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "stringify", + call: builtinJSON_stringify, + }, + } + runtime.global.JSON = &_object{ + runtime: runtime, + class: "JSON", + objectClass: _classObject, + prototype: runtime.global.ObjectPrototype, + extensible: true, + property: map[string]_property{ + "parse": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: parse_function, + }, + }, + "stringify": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: stringify_function, + }, + }, + }, + propertyOrder: []string{ + "parse", + "stringify", + }, + } + } + { + eval_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "eval", + call: builtinGlobal_eval, + }, + } + parseInt_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 2, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "parseInt", + call: builtinGlobal_parseInt, + }, + } + parseFloat_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "parseFloat", + call: builtinGlobal_parseFloat, + }, + } + isNaN_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "isNaN", + call: builtinGlobal_isNaN, + }, + } + isFinite_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "isFinite", + call: builtinGlobal_isFinite, + }, + } + decodeURI_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "decodeURI", + call: builtinGlobal_decodeURI, + }, + } + decodeURIComponent_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "decodeURIComponent", + call: builtinGlobal_decodeURIComponent, + }, + } + encodeURI_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "encodeURI", + call: builtinGlobal_encodeURI, + }, + } + encodeURIComponent_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "encodeURIComponent", + call: builtinGlobal_encodeURIComponent, + }, + } + escape_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "escape", + call: builtinGlobal_escape, + }, + } + unescape_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 1, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "unescape", + call: builtinGlobal_unescape, + }, + } + runtime.globalObject.property = map[string]_property{ + "eval": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: eval_function, + }, + }, + "parseInt": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: parseInt_function, + }, + }, + "parseFloat": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: parseFloat_function, + }, + }, + "isNaN": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: isNaN_function, + }, + }, + "isFinite": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: isFinite_function, + }, + }, + "decodeURI": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: decodeURI_function, + }, + }, + "decodeURIComponent": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: decodeURIComponent_function, + }, + }, + "encodeURI": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: encodeURI_function, + }, + }, + "encodeURIComponent": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: encodeURIComponent_function, + }, + }, + "escape": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: escape_function, + }, + }, + "unescape": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: unescape_function, + }, + }, + "Object": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Object, + }, + }, + "Function": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Function, + }, + }, + "Array": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Array, + }, + }, + "String": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.String, + }, + }, + "Boolean": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Boolean, + }, + }, + "Number": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Number, + }, + }, + "Math": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Math, + }, + }, + "Date": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Date, + }, + }, + "RegExp": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.RegExp, + }, + }, + "Error": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.Error, + }, + }, + "EvalError": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.EvalError, + }, + }, + "TypeError": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.TypeError, + }, + }, + "RangeError": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.RangeError, + }, + }, + "ReferenceError": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.ReferenceError, + }, + }, + "SyntaxError": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.SyntaxError, + }, + }, + "URIError": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.URIError, + }, + }, + "JSON": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: runtime.global.JSON, + }, + }, + "undefined": _property{ + mode: 0, + value: Value{ + kind: valueUndefined, + }, + }, + "NaN": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.NaN(), + }, + }, + "Infinity": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: math.Inf(+1), + }, + }, + } + runtime.globalObject.propertyOrder = []string{ + "eval", + "parseInt", + "parseFloat", + "isNaN", + "isFinite", + "decodeURI", + "decodeURIComponent", + "encodeURI", + "encodeURIComponent", + "escape", + "unescape", + "Object", + "Function", + "Array", + "String", + "Boolean", + "Number", + "Math", + "Date", + "RegExp", + "Error", + "EvalError", + "TypeError", + "RangeError", + "ReferenceError", + "SyntaxError", + "URIError", + "JSON", + "undefined", + "NaN", + "Infinity", + } + } +} + +func newConsoleObject(runtime *_runtime) *_object { + { + log_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "log", + call: builtinConsole_log, + }, + } + debug_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "debug", + call: builtinConsole_log, + }, + } + info_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "info", + call: builtinConsole_log, + }, + } + error_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "error", + call: builtinConsole_error, + }, + } + warn_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "warn", + call: builtinConsole_error, + }, + } + dir_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "dir", + call: builtinConsole_dir, + }, + } + time_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "time", + call: builtinConsole_time, + }, + } + timeEnd_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "timeEnd", + call: builtinConsole_timeEnd, + }, + } + trace_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "trace", + call: builtinConsole_trace, + }, + } + assert_function := &_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: map[string]_property{ + "length": _property{ + mode: 0, + value: Value{ + kind: valueNumber, + value: 0, + }, + }, + }, + propertyOrder: []string{ + "length", + }, + value: _nativeFunctionObject{ + name: "assert", + call: builtinConsole_assert, + }, + } + return &_object{ + runtime: runtime, + class: "Object", + objectClass: _classObject, + prototype: runtime.global.ObjectPrototype, + extensible: true, + property: map[string]_property{ + "log": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: log_function, + }, + }, + "debug": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: debug_function, + }, + }, + "info": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: info_function, + }, + }, + "error": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: error_function, + }, + }, + "warn": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: warn_function, + }, + }, + "dir": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: dir_function, + }, + }, + "time": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: time_function, + }, + }, + "timeEnd": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: timeEnd_function, + }, + }, + "trace": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: trace_function, + }, + }, + "assert": _property{ + mode: 0101, + value: Value{ + kind: valueObject, + value: assert_function, + }, + }, + }, + propertyOrder: []string{ + "log", + "debug", + "info", + "error", + "warn", + "dir", + "time", + "timeEnd", + "trace", + "assert", + }, + } + } +} + +func toValue_int(value int) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_int8(value int8) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_int16(value int16) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_int32(value int32) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_int64(value int64) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_uint(value uint) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_uint8(value uint8) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_uint16(value uint16) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_uint32(value uint32) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_uint64(value uint64) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_float32(value float32) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_float64(value float64) Value { + return Value{ + kind: valueNumber, + value: value, + } +} + +func toValue_string(value string) Value { + return Value{ + kind: valueString, + value: value, + } +} + +func toValue_string16(value []uint16) Value { + return Value{ + kind: valueString, + value: value, + } +} + +func toValue_bool(value bool) Value { + return Value{ + kind: valueBoolean, + value: value, + } +} + +func toValue_object(value *_object) Value { + return Value{ + kind: valueObject, + value: value, + } +} diff --git a/vendor/github.com/robertkrimen/otto/inline.pl b/vendor/github.com/robertkrimen/otto/inline.pl new file mode 100755 index 000000000..c3620b4a2 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/inline.pl @@ -0,0 +1,1086 @@ +#!/usr/bin/env perl + +my $_fmt; +$_fmt = "gofmt"; +$_fmt = "cat -n" if "cat" eq ($ARGV[0] || ""); + +use strict; +use warnings; +use IO::File; + +my $self = __PACKAGE__; + +sub functionLabel ($) { + return "$_[0]_function"; +} + +sub trim ($) { + local $_ = shift; + s/^\s*//, s/\s*$// for $_; + return $_; +} + +open my $fmt, "|-", "$_fmt" or die $!; + +$fmt->print(<<_END_); +package otto + +import ( + "math" +) + +func _newContext(runtime *_runtime) { +@{[ join "\n", $self->newContext() ]} +} + +func newConsoleObject(runtime *_runtime) *_object { +@{[ join "\n", $self->newConsoleObject() ]} +} +_END_ + +for (qw/int int8 int16 int32 int64 uint uint8 uint16 uint32 uint64 float32 float64/) { + $fmt->print(<<_END_); + +func toValue_$_(value $_) Value { + return Value{ + kind: valueNumber, + value: value, + } +} +_END_ +} + +$fmt->print(<<_END_); + +func toValue_string(value string) Value { + return Value{ + kind: valueString, + value: value, + } +} + +func toValue_string16(value []uint16) Value { + return Value{ + kind: valueString, + value: value, + } +} + +func toValue_bool(value bool) Value { + return Value{ + kind: valueBoolean, + value: value, + } +} + +func toValue_object(value *_object) Value { + return Value{ + kind: valueObject, + value: value, + } +} +_END_ + +close $fmt; + +sub newConsoleObject { + my $self = shift; + + return + $self->block(sub { + my $class = "Console"; + my @got = $self->functionDeclare( + $class, + "log", 0, + "debug:log", 0, + "info:log", 0, + "error", 0, + "warn:error", 0, + "dir", 0, + "time", 0, + "timeEnd", 0, + "trace", 0, + "assert", 0, + ); + return + "return @{[ $self->newObject(@got) ]}" + }), + ; +} + +sub newContext { + my $self = shift; + return + # ObjectPrototype + $self->block(sub { + my $class = "Object"; + return + ".${class}Prototype =", + $self->globalPrototype( + $class, + "_classObject", + undef, + "prototypeValueObject", + ), + }), + + # FunctionPrototype + $self->block(sub { + my $class = "Function"; + return + ".${class}Prototype =", + $self->globalPrototype( + $class, + "_classObject", + ".ObjectPrototype", + "prototypeValueFunction", + ), + }), + + # ObjectPrototype + $self->block(sub { + my $class = "Object"; + my @got = $self->functionDeclare( + $class, + "valueOf", 0, + "toString", 0, + "toLocaleString", 0, + "hasOwnProperty", 1, + "isPrototypeOf", 1, + "propertyIsEnumerable", 1, + ); + my @propertyMap = $self->propertyMap( + @got, + $self->property("constructor", undef), + ); + my $propertyOrder = $self->propertyOrder(@propertyMap); + $propertyOrder =~ s/^propertyOrder: //; + return + ".${class}Prototype.property =", @propertyMap, + ".${class}Prototype.propertyOrder =", $propertyOrder, + }), + + # FunctionPrototype + $self->block(sub { + my $class = "Function"; + my @got = $self->functionDeclare( + $class, + "toString", 0, + "apply", 2, + "call", 1, + "bind", 1, + ); + my @propertyMap = $self->propertyMap( + @got, + $self->property("constructor", undef), + $self->property("length", $self->numberValue(0), "0"), + ); + my $propertyOrder = $self->propertyOrder(@propertyMap); + $propertyOrder =~ s/^propertyOrder: //; + return + ".${class}Prototype.property =", @propertyMap, + ".${class}Prototype.propertyOrder =", $propertyOrder, + }), + + # Object + $self->block(sub { + my $class = "Object"; + return + ".$class =", + $self->globalFunction( + $class, + 1, + $self->functionDeclare( + $class, + "getPrototypeOf", 1, + "getOwnPropertyDescriptor", 2, + "defineProperty", 3, + "defineProperties", 2, + "create", 2, + "isExtensible", 1, + "preventExtensions", 1, + "isSealed", 1, + "seal", 1, + "isFrozen", 1, + "freeze", 1, + "keys", 1, + "getOwnPropertyNames", 1, + ), + ), + }), + + # Function + $self->block(sub { + my $class = "Function"; + return + "Function :=", + $self->globalFunction( + $class, + 1, + ), + ".$class = Function", + }), + + # Array + $self->block(sub { + my $class = "Array"; + my @got = $self->functionDeclare( + $class, + "toString", 0, + "toLocaleString", 0, + "concat", 1, + "join", 1, + "splice", 2, + "shift", 0, + "pop", 0, + "push", 1, + "slice", 2, + "unshift", 1, + "reverse", 0, + "sort", 1, + "indexOf", 1, + "lastIndexOf", 1, + "every", 1, + "some", 1, + "forEach", 1, + "map", 1, + "filter", 1, + "reduce", 1, + "reduceRight", 1, + ); + return + ".${class}Prototype =", + $self->globalPrototype( + $class, + "_classArray", + ".ObjectPrototype", + undef, + $self->property("length", $self->numberValue("uint32(0)"), "0100"), + @got, + ), + ".$class =", + $self->globalFunction( + $class, + 1, + $self->functionDeclare( + $class, + "isArray", 1, + ), + ), + }), + + # String + $self->block(sub { + my $class = "String"; + my @got = $self->functionDeclare( + $class, + "toString", 0, + "valueOf", 0, + "charAt", 1, + "charCodeAt", 1, + "concat", 1, + "indexOf", 1, + "lastIndexOf", 1, + "match", 1, + "replace", 2, + "search", 1, + "split", 2, + "slice", 2, + "substring", 2, + "toLowerCase", 0, + "toUpperCase", 0, + "substr", 2, + "trim", 0, + "trimLeft", 0, + "trimRight", 0, + "localeCompare", 1, + "toLocaleLowerCase", 0, + "toLocaleUpperCase", 0, + ); + return + ".${class}Prototype =", + $self->globalPrototype( + $class, + "_classString", + ".ObjectPrototype", + "prototypeValueString", + $self->property("length", $self->numberValue("int(0)"), "0"), + @got, + ), + ".$class =", + $self->globalFunction( + $class, + 1, + $self->functionDeclare( + $class, + "fromCharCode", 1, + ), + ), + }), + + # Boolean + $self->block(sub { + my $class = "Boolean"; + my @got = $self->functionDeclare( + $class, + "toString", 0, + "valueOf", 0, + ); + return + ".${class}Prototype =", + $self->globalPrototype( + $class, + "_classObject", + ".ObjectPrototype", + "prototypeValueBoolean", + @got, + ), + ".$class =", + $self->globalFunction( + $class, + 1, + $self->functionDeclare( + $class, + ), + ), + }), + + # Number + $self->block(sub { + my $class = "Number"; + my @got = $self->functionDeclare( + $class, + "toString", 0, + "valueOf", 0, + "toFixed", 1, + "toExponential", 1, + "toPrecision", 1, + "toLocaleString", 1, + ); + return + ".${class}Prototype =", + $self->globalPrototype( + $class, + "_classObject", + ".ObjectPrototype", + "prototypeValueNumber", + @got, + ), + ".$class =", + $self->globalFunction( + $class, + 1, + $self->functionDeclare( + $class, + ), + $self->numberConstantDeclare( + "MAX_VALUE", "math.MaxFloat64", + "MIN_VALUE", "math.SmallestNonzeroFloat64", + "NaN", "math.NaN()", + "NEGATIVE_INFINITY", "math.Inf(-1)", + "POSITIVE_INFINITY", "math.Inf(+1)", + ), + ), + }), + + # Math + $self->block(sub { + my $class = "Math"; + return + ".$class =", + $self->globalObject( + $class, + $self->functionDeclare( + $class, + "abs", 1, + "acos", 1, + "asin", 1, + "atan", 1, + "atan2", 1, + "ceil", 1, + "cos", 1, + "exp", 1, + "floor", 1, + "log", 1, + "max", 2, + "min", 2, + "pow", 2, + "random", 0, + "round", 1, + "sin", 1, + "sqrt", 1, + "tan", 1, + ), + $self->numberConstantDeclare( + "E", "math.E", + "LN10", "math.Ln10", + "LN2", "math.Ln2", + "LOG2E", "math.Log2E", + "LOG10E", "math.Log10E", + "PI", "math.Pi", + "SQRT1_2", "sqrt1_2", + "SQRT2", "math.Sqrt2", + ) + ), + }), + + # Date + $self->block(sub { + my $class = "Date"; + my @got = $self->functionDeclare( + $class, + "toString", 0, + "toDateString", 0, + "toTimeString", 0, + "toUTCString", 0, + "toISOString", 0, + "toJSON", 1, + "toGMTString", 0, + "toLocaleString", 0, + "toLocaleDateString", 0, + "toLocaleTimeString", 0, + "valueOf", 0, + "getTime", 0, + "getYear", 0, + "getFullYear", 0, + "getUTCFullYear", 0, + "getMonth", 0, + "getUTCMonth", 0, + "getDate", 0, + "getUTCDate", 0, + "getDay", 0, + "getUTCDay", 0, + "getHours", 0, + "getUTCHours", 0, + "getMinutes", 0, + "getUTCMinutes", 0, + "getSeconds", 0, + "getUTCSeconds", 0, + "getMilliseconds", 0, + "getUTCMilliseconds", 0, + "getTimezoneOffset", 0, + "setTime", 1, + "setMilliseconds", 1, + "setUTCMilliseconds", 1, + "setSeconds", 2, + "setUTCSeconds", 2, + "setMinutes", 3, + "setUTCMinutes", 3, + "setHours", 4, + "setUTCHours", 4, + "setDate", 1, + "setUTCDate", 1, + "setMonth", 2, + "setUTCMonth", 2, + "setYear", 1, + "setFullYear", 3, + "setUTCFullYear", 3, + ); + return + ".${class}Prototype =", + $self->globalPrototype( + $class, + "_classObject", + ".ObjectPrototype", + "prototypeValueDate", + @got, + ), + ".$class =", + $self->globalFunction( + $class, + 7, + $self->functionDeclare( + $class, + "parse", 1, + "UTC", 7, + "now", 0, + ), + ), + }), + + # RegExp + $self->block(sub { + my $class = "RegExp"; + my @got = $self->functionDeclare( + $class, + "toString", 0, + "exec", 1, + "test", 1, + "compile", 1, + ); + return + ".${class}Prototype =", + $self->globalPrototype( + $class, + "_classObject", + ".ObjectPrototype", + "prototypeValueRegExp", + @got, + ), + ".$class =", + $self->globalFunction( + $class, + 2, + $self->functionDeclare( + $class, + ), + ), + }), + + # Error + $self->block(sub { + my $class = "Error"; + my @got = $self->functionDeclare( + $class, + "toString", 0, + ); + return + ".${class}Prototype =", + $self->globalPrototype( + $class, + "_classObject", + ".ObjectPrototype", + undef, + @got, + $self->property("name", $self->stringValue("Error")), + $self->property("message", $self->stringValue("")), + ), + ".$class =", + $self->globalFunction( + $class, + 1, + $self->functionDeclare( + $class, + ), + ), + }), + + (map { + my $class = "${_}Error"; + $self->block(sub { + my @got = $self->functionDeclare( + $class, + ); + return + ".${class}Prototype =", + $self->globalPrototype( + $class, + "_classObject", + ".ErrorPrototype", + undef, + @got, + $self->property("name", $self->stringValue($class)), + ), + ".$class =", + $self->globalFunction( + $class, + 1, + $self->functionDeclare( + $class, + ), + ), + }); + } qw/Eval Type Range Reference Syntax URI/), + + # JSON + $self->block(sub { + my $class = "JSON"; + return + ".$class =", + $self->globalObject( + $class, + $self->functionDeclare( + $class, + "parse", 2, + "stringify", 3, + ), + ), + }), + + # Global + $self->block(sub { + my $class = "Global"; + my @got = $self->functionDeclare( + $class, + "eval", 1, + "parseInt", 2, + "parseFloat", 1, + "isNaN", 1, + "isFinite", 1, + "decodeURI", 1, + "decodeURIComponent", 1, + "encodeURI", 1, + "encodeURIComponent", 1, + "escape", 1, + "unescape", 1, + ); + my @propertyMap = $self->propertyMap( + @got, + $self->globalDeclare( + "Object", + "Function", + "Array", + "String", + "Boolean", + "Number", + "Math", + "Date", + "RegExp", + "Error", + "EvalError", + "TypeError", + "RangeError", + "ReferenceError", + "SyntaxError", + "URIError", + "JSON", + ), + $self->property("undefined", $self->undefinedValue(), "0"), + $self->property("NaN", $self->numberValue("math.NaN()"), "0"), + $self->property("Infinity", $self->numberValue("math.Inf(+1)"), "0"), + ); + my $propertyOrder = $self->propertyOrder(@propertyMap); + $propertyOrder =~ s/^propertyOrder: //; + return + "runtime.globalObject.property =", + @propertyMap, + "runtime.globalObject.propertyOrder =", + $propertyOrder, + ; + }), + ; +} + +sub propertyMap { + my $self = shift; + return "map[string]_property{", (join ",\n", @_, ""), "}", +} + +our (@preblock, @postblock); +sub block { + my $self = shift; + local @preblock = (); + local @postblock = (); + my @input = $_[0]->(); + my @output; + while (@input) { + local $_ = shift @input; + if (m/^\./) { + $_ = "runtime.global$_"; + } + if (m/ :?=$/) { + $_ .= shift @input; + } + push @output, $_; + } + return + "{", + @preblock, + @output, + @postblock, + "}", + ; +} + +sub numberConstantDeclare { + my $self = shift; + my @got; + while (@_) { + my $name = shift; + my $value = shift; + push @got, $self->property($name, $self->numberValue($value), "0"), + } + return @got; +} + +sub functionDeclare { + my $self = shift; + my $class = shift; + my $builtin = "builtin${class}"; + my @got; + while (@_) { + my $name = shift; + my $length = shift; + $name = $self->newFunction($name, "${builtin}_", $length); + push @got, $self->functionProperty($name), + } + return @got; +} + +sub globalDeclare { + my $self = shift; + my @got; + while (@_) { + my $name = shift; + push @got, $self->property($name, $self->objectValue("runtime.global.$name"), "0101"), + } + return @got; +} + +sub propertyOrder { + my $self = shift; + my $propertyMap = join "", @_; + + my (@keys) = $propertyMap =~ m/("\w+"):/g; + my $propertyOrder = + join "\n", "propertyOrder: []string{", (join ",\n", @keys, ""), "}"; + return $propertyOrder; +} + +sub globalObject { + my $self = shift; + my $name = shift; + + my $propertyMap = ""; + if (@_) { + $propertyMap = join "\n", $self->propertyMap(@_); + my $propertyOrder = $self->propertyOrder($propertyMap); + $propertyMap = "property: $propertyMap,\n$propertyOrder,"; + } + + return trim <<_END_; +&_object{ + runtime: runtime, + class: "$name", + objectClass: _classObject, + prototype: runtime.global.ObjectPrototype, + extensible: true, + $propertyMap +} +_END_ +} + +sub globalFunction { + my $self = shift; + my $name = shift; + my $length = shift; + + my $builtin = "builtin${name}"; + my $builtinNew = "builtinNew${name}"; + my $prototype = "runtime.global.${name}Prototype"; + my $propertyMap = ""; + unshift @_, + $self->property("length", $self->numberValue($length), "0"), + $self->property("prototype", $self->objectValue($prototype), "0"), + ; + + if (@_) { + $propertyMap = join "\n", $self->propertyMap(@_); + my $propertyOrder = $self->propertyOrder($propertyMap); + $propertyMap = "property: $propertyMap,\n$propertyOrder,"; + } + + push @postblock, $self->statement( + "$prototype.property[\"constructor\"] =", + $self->property(undef, $self->objectValue("runtime.global.${name}"), "0101"), + ); + + return trim <<_END_; +&_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + value: @{[ $self->nativeFunctionOf($name, $builtin, $builtinNew) ]}, + $propertyMap +} +_END_ +} + +sub nativeCallFunction { + my $self = shift; + my $name = shift; + my $func = shift; + return trim <<_END_; +_nativeCallFunction{ "$name", $func } +_END_ +} + +sub globalPrototype { + my $self = shift; + my $class = shift; + my $classObject = shift; + my $prototype = shift; + my $value = shift; + + if (!defined $prototype) { + $prototype = "nil"; + } + + if (!defined $value) { + $value = "nil"; + } + + if ($prototype =~ m/^\./) { + $prototype = "runtime.global$prototype"; + } + + my $propertyMap = ""; + if (@_) { + $propertyMap = join "\n", $self->propertyMap(@_); + my $propertyOrder = $self->propertyOrder($propertyMap); + $propertyMap = "property: $propertyMap,\n$propertyOrder,"; + } + + return trim <<_END_; +&_object{ + runtime: runtime, + class: "$class", + objectClass: $classObject, + prototype: $prototype, + extensible: true, + value: $value, + $propertyMap +} +_END_ +} + +sub newFunction { + my $self = shift; + my $name = shift; + my $func = shift; + my $length = shift; + + my @name = ($name, $name); + if ($name =~ m/^(\w+):(\w+)$/) { + @name = ($1, $2); + $name = $name[0]; + } + + if ($func =~ m/^builtin\w+_$/) { + $func = "$func$name[1]"; + } + + my $propertyOrder = ""; + my @propertyMap = ( + $self->property("length", $self->numberValue($length), "0"), + ); + + if (@propertyMap) { + $propertyOrder = $self->propertyOrder(@propertyMap); + $propertyOrder = "$propertyOrder,"; + } + + my $label = functionLabel($name); + push @preblock, $self->statement( + "$label := @{[ trim <<_END_ ]}", +&_object{ + runtime: runtime, + class: "Function", + objectClass: _classObject, + prototype: runtime.global.FunctionPrototype, + extensible: true, + property: @{[ join "\n", $self->propertyMap(@propertyMap) ]}, + $propertyOrder + value: @{[ $self->nativeFunctionOf($name, $func) ]}, +} +_END_ + ); + + return $name; +} + +sub newObject { + my $self = shift; + + my $propertyMap = join "\n", $self->propertyMap(@_); + my $propertyOrder = $self->propertyOrder($propertyMap); + + return trim <<_END_; +&_object{ + runtime: runtime, + class: "Object", + objectClass: _classObject, + prototype: runtime.global.ObjectPrototype, + extensible: true, + property: $propertyMap, + $propertyOrder, +} +_END_ +} + +sub newPrototypeObject { + my $self = shift; + my $class = shift; + my $objectClass = shift; + my $value = shift; + if (defined $value) { + $value = "value: $value,"; + } + + my $propertyMap = join "\n", $self->propertyMap(@_); + my $propertyOrder = $self->propertyOrder($propertyMap); + + return trim <<_END_; +&_object{ + runtime: runtime, + class: "$class", + objectClass: $objectClass, + prototype: runtime.global.ObjectPrototype, + extensible: true, + property: $propertyMap, + $propertyOrder, + $value +} +_END_ +} + +sub functionProperty { + my $self = shift; + my $name = shift; + + return $self->property( + $name, + $self->objectValue(functionLabel($name)) + ); +} + +sub statement { + my $self = shift; + return join "\n", @_; +} + +sub functionOf { + my $self = shift; + my $call = shift; + my $construct = shift; + if ($construct) { + $construct = "construct: $construct,"; + } else { + $construct = ""; + } + + return trim <<_END_ +_functionObject{ + call: $call, + $construct +} +_END_ +} + +sub nativeFunctionOf { + my $self = shift; + my $name = shift; + my $call = shift; + my $construct = shift; + if ($construct) { + $construct = "construct: $construct,"; + } else { + $construct = ""; + } + + return trim <<_END_ +_nativeFunctionObject{ + name: "$name", + call: $call, + $construct +} +_END_ +} + +sub nameProperty { + my $self = shift; + my $name = shift; + my $value = shift; + + return trim <<_END_; +"$name": _property{ + mode: 0101, + value: $value, +} +_END_ +} + +sub numberValue { + my $self = shift; + my $value = shift; + return trim <<_END_; +Value{ + kind: valueNumber, + value: $value, +} +_END_ +} + +sub property { + my $self = shift; + my $name = shift; + my $value = shift; + my $mode = shift; + $mode = "0101" unless defined $mode; + if (! defined $value) { + $value = "Value{}"; + } + if (defined $name) { + return trim <<_END_; +"$name": _property{ + mode: $mode, + value: $value, +} +_END_ + } else { + return trim <<_END_; +_property{ + mode: $mode, + value: $value, +} +_END_ + } + +} + +sub objectProperty { + my $self = shift; + my $name = shift; + my $value = shift; + + return trim <<_END_; +"$name": _property{ + mode: 0101, + value: @{[ $self->objectValue($value)]}, +} +_END_ +} + +sub objectValue { + my $self = shift; + my $value = shift; + return trim <<_END_ +Value{ + kind: valueObject, + value: $value, +} +_END_ +} + +sub stringValue { + my $self = shift; + my $value = shift; + return trim <<_END_ +Value{ + kind: valueString, + value: "$value", +} +_END_ +} + +sub booleanValue { + my $self = shift; + my $value = shift; + return trim <<_END_ +Value{ + kind: valueBoolean, + value: $value, +} +_END_ +} + +sub undefinedValue { + my $self = shift; + return trim <<_END_ +Value{ + kind: valueUndefined, +} +_END_ +} diff --git a/vendor/github.com/robertkrimen/otto/object.go b/vendor/github.com/robertkrimen/otto/object.go new file mode 100644 index 000000000..849812c91 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/object.go @@ -0,0 +1,156 @@ +package otto + +type _object struct { + runtime *_runtime + + class string + objectClass *_objectClass + value interface{} + + prototype *_object + extensible bool + + property map[string]_property + propertyOrder []string +} + +func newObject(runtime *_runtime, class string) *_object { + self := &_object{ + runtime: runtime, + class: class, + objectClass: _classObject, + property: make(map[string]_property), + extensible: true, + } + return self +} + +// 8.12 + +// 8.12.1 +func (self *_object) getOwnProperty(name string) *_property { + return self.objectClass.getOwnProperty(self, name) +} + +// 8.12.2 +func (self *_object) getProperty(name string) *_property { + return self.objectClass.getProperty(self, name) +} + +// 8.12.3 +func (self *_object) get(name string) Value { + return self.objectClass.get(self, name) +} + +// 8.12.4 +func (self *_object) canPut(name string) bool { + return self.objectClass.canPut(self, name) +} + +// 8.12.5 +func (self *_object) put(name string, value Value, throw bool) { + self.objectClass.put(self, name, value, throw) +} + +// 8.12.6 +func (self *_object) hasProperty(name string) bool { + return self.objectClass.hasProperty(self, name) +} + +func (self *_object) hasOwnProperty(name string) bool { + return self.objectClass.hasOwnProperty(self, name) +} + +type _defaultValueHint int + +const ( + defaultValueNoHint _defaultValueHint = iota + defaultValueHintString + defaultValueHintNumber +) + +// 8.12.8 +func (self *_object) DefaultValue(hint _defaultValueHint) Value { + if hint == defaultValueNoHint { + if self.class == "Date" { + // Date exception + hint = defaultValueHintString + } else { + hint = defaultValueHintNumber + } + } + methodSequence := []string{"valueOf", "toString"} + if hint == defaultValueHintString { + methodSequence = []string{"toString", "valueOf"} + } + for _, methodName := range methodSequence { + method := self.get(methodName) + // FIXME This is redundant... + if method.isCallable() { + result := method._object().call(toValue_object(self), nil, false, nativeFrame) + if result.IsPrimitive() { + return result + } + } + } + + panic(self.runtime.panicTypeError()) +} + +func (self *_object) String() string { + return self.DefaultValue(defaultValueHintString).string() +} + +func (self *_object) defineProperty(name string, value Value, mode _propertyMode, throw bool) bool { + return self.defineOwnProperty(name, _property{value, mode}, throw) +} + +// 8.12.9 +func (self *_object) defineOwnProperty(name string, descriptor _property, throw bool) bool { + return self.objectClass.defineOwnProperty(self, name, descriptor, throw) +} + +func (self *_object) delete(name string, throw bool) bool { + return self.objectClass.delete(self, name, throw) +} + +func (self *_object) enumerate(all bool, each func(string) bool) { + self.objectClass.enumerate(self, all, each) +} + +func (self *_object) _exists(name string) bool { + _, exists := self.property[name] + return exists +} + +func (self *_object) _read(name string) (_property, bool) { + property, exists := self.property[name] + return property, exists +} + +func (self *_object) _write(name string, value interface{}, mode _propertyMode) { + if value == nil { + value = Value{} + } + _, exists := self.property[name] + self.property[name] = _property{value, mode} + if !exists { + self.propertyOrder = append(self.propertyOrder, name) + } +} + +func (self *_object) _delete(name string) { + _, exists := self.property[name] + delete(self.property, name) + if exists { + for index, property := range self.propertyOrder { + if name == property { + if index == len(self.propertyOrder)-1 { + self.propertyOrder = self.propertyOrder[:index] + } else { + self.propertyOrder = append(self.propertyOrder[:index], self.propertyOrder[index+1:]...) + } + } + } + } +} diff --git a/vendor/github.com/robertkrimen/otto/object_class.go b/vendor/github.com/robertkrimen/otto/object_class.go new file mode 100644 index 000000000..d18b9cede --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/object_class.go @@ -0,0 +1,493 @@ +package otto + +import ( + "encoding/json" +) + +type _objectClass struct { + getOwnProperty func(*_object, string) *_property + getProperty func(*_object, string) *_property + get func(*_object, string) Value + canPut func(*_object, string) bool + put func(*_object, string, Value, bool) + hasProperty func(*_object, string) bool + hasOwnProperty func(*_object, string) bool + defineOwnProperty func(*_object, string, _property, bool) bool + delete func(*_object, string, bool) bool + enumerate func(*_object, bool, func(string) bool) + clone func(*_object, *_object, *_clone) *_object + marshalJSON func(*_object) json.Marshaler +} + +func objectEnumerate(self *_object, all bool, each func(string) bool) { + for _, name := range self.propertyOrder { + if all || self.property[name].enumerable() { + if !each(name) { + return + } + } + } +} + +var ( + _classObject, + _classArray, + _classString, + _classArguments, + _classGoStruct, + _classGoMap, + _classGoArray, + _classGoSlice, + _ *_objectClass +) + +func init() { + _classObject = &_objectClass{ + objectGetOwnProperty, + objectGetProperty, + objectGet, + objectCanPut, + objectPut, + objectHasProperty, + objectHasOwnProperty, + objectDefineOwnProperty, + objectDelete, + objectEnumerate, + objectClone, + nil, + } + + _classArray = &_objectClass{ + objectGetOwnProperty, + objectGetProperty, + objectGet, + objectCanPut, + objectPut, + objectHasProperty, + objectHasOwnProperty, + arrayDefineOwnProperty, + objectDelete, + objectEnumerate, + objectClone, + nil, + } + + _classString = &_objectClass{ + stringGetOwnProperty, + objectGetProperty, + objectGet, + objectCanPut, + objectPut, + objectHasProperty, + objectHasOwnProperty, + objectDefineOwnProperty, + objectDelete, + stringEnumerate, + objectClone, + nil, + } + + _classArguments = &_objectClass{ + argumentsGetOwnProperty, + objectGetProperty, + argumentsGet, + objectCanPut, + objectPut, + objectHasProperty, + objectHasOwnProperty, + argumentsDefineOwnProperty, + argumentsDelete, + objectEnumerate, + objectClone, + nil, + } + + _classGoStruct = &_objectClass{ + goStructGetOwnProperty, + objectGetProperty, + objectGet, + goStructCanPut, + goStructPut, + objectHasProperty, + objectHasOwnProperty, + objectDefineOwnProperty, + objectDelete, + goStructEnumerate, + objectClone, + goStructMarshalJSON, + } + + _classGoMap = &_objectClass{ + goMapGetOwnProperty, + objectGetProperty, + objectGet, + objectCanPut, + objectPut, + objectHasProperty, + objectHasOwnProperty, + goMapDefineOwnProperty, + goMapDelete, + goMapEnumerate, + objectClone, + nil, + } + + _classGoArray = &_objectClass{ + goArrayGetOwnProperty, + objectGetProperty, + objectGet, + objectCanPut, + objectPut, + objectHasProperty, + objectHasOwnProperty, + goArrayDefineOwnProperty, + goArrayDelete, + goArrayEnumerate, + objectClone, + nil, + } + + _classGoSlice = &_objectClass{ + goSliceGetOwnProperty, + objectGetProperty, + objectGet, + objectCanPut, + objectPut, + objectHasProperty, + objectHasOwnProperty, + goSliceDefineOwnProperty, + goSliceDelete, + goSliceEnumerate, + objectClone, + nil, + } +} + +// Allons-y + +// 8.12.1 +func objectGetOwnProperty(self *_object, name string) *_property { + // Return a _copy_ of the property + property, exists := self._read(name) + if !exists { + return nil + } + return &property +} + +// 8.12.2 +func objectGetProperty(self *_object, name string) *_property { + property := self.getOwnProperty(name) + if property != nil { + return property + } + if self.prototype != nil { + return self.prototype.getProperty(name) + } + return nil +} + +// 8.12.3 +func objectGet(self *_object, name string) Value { + property := self.getProperty(name) + if property != nil { + return property.get(self) + } + return Value{} +} + +// 8.12.4 +func objectCanPut(self *_object, name string) bool { + canPut, _, _ := _objectCanPut(self, name) + return canPut +} + +func _objectCanPut(self *_object, name string) (canPut bool, property *_property, setter *_object) { + property = self.getOwnProperty(name) + if property != nil { + switch propertyValue := property.value.(type) { + case Value: + canPut = property.writable() + return + case _propertyGetSet: + setter = propertyValue[1] + canPut = setter != nil + return + default: + panic(self.runtime.panicTypeError()) + } + } + + if self.prototype == nil { + return self.extensible, nil, nil + } + + property = self.prototype.getProperty(name) + if property == nil { + return self.extensible, nil, nil + } + + switch propertyValue := property.value.(type) { + case Value: + if !self.extensible { + return false, nil, nil + } + return property.writable(), nil, nil + case _propertyGetSet: + setter = propertyValue[1] + canPut = setter != nil + return + default: + panic(self.runtime.panicTypeError()) + } +} + +// 8.12.5 +func objectPut(self *_object, name string, value Value, throw bool) { + + if true { + // Shortcut... + // + // So, right now, every class is using objectCanPut and every class + // is using objectPut. + // + // If that were to no longer be the case, we would have to have + // something to detect that here, so that we do not use an + // incompatible canPut routine + canPut, property, setter := _objectCanPut(self, name) + if !canPut { + self.runtime.typeErrorResult(throw) + } else if setter != nil { + setter.call(toValue(self), []Value{value}, false, nativeFrame) + } else if property != nil { + property.value = value + self.defineOwnProperty(name, *property, throw) + } else { + self.defineProperty(name, value, 0111, throw) + } + return + } + + // The long way... + // + // Right now, code should never get here, see above + if !self.canPut(name) { + self.runtime.typeErrorResult(throw) + return + } + + property := self.getOwnProperty(name) + if property == nil { + property = self.getProperty(name) + if property != nil { + if getSet, isAccessor := property.value.(_propertyGetSet); isAccessor { + getSet[1].call(toValue(self), []Value{value}, false, nativeFrame) + return + } + } + self.defineProperty(name, value, 0111, throw) + } else { + switch propertyValue := property.value.(type) { + case Value: + property.value = value + self.defineOwnProperty(name, *property, throw) + case _propertyGetSet: + if propertyValue[1] != nil { + propertyValue[1].call(toValue(self), []Value{value}, false, nativeFrame) + return + } + if throw { + panic(self.runtime.panicTypeError()) + } + default: + panic(self.runtime.panicTypeError()) + } + } +} + +// 8.12.6 +func objectHasProperty(self *_object, name string) bool { + return self.getProperty(name) != nil +} + +func objectHasOwnProperty(self *_object, name string) bool { + return self.getOwnProperty(name) != nil +} + +// 8.12.9 +func objectDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool { + property, exists := self._read(name) + { + if !exists { + if !self.extensible { + goto Reject + } + if newGetSet, isAccessor := descriptor.value.(_propertyGetSet); isAccessor { + if newGetSet[0] == &_nilGetSetObject { + newGetSet[0] = nil + } + if newGetSet[1] == &_nilGetSetObject { + newGetSet[1] = nil + } + descriptor.value = newGetSet + } + self._write(name, descriptor.value, descriptor.mode) + return true + } + if descriptor.isEmpty() { + return true + } + + // TODO Per 8.12.9.6 - We should shortcut here (returning true) if + // the current and new (define) properties are the same + + configurable := property.configurable() + if !configurable { + if descriptor.configurable() { + goto Reject + } + // Test that, if enumerable is set on the property descriptor, then it should + // be the same as the existing property + if descriptor.enumerateSet() && descriptor.enumerable() != property.enumerable() { + goto Reject + } + } + value, isDataDescriptor := property.value.(Value) + getSet, _ := property.value.(_propertyGetSet) + if descriptor.isGenericDescriptor() { + // GenericDescriptor + } else if isDataDescriptor != descriptor.isDataDescriptor() { + // DataDescriptor <=> AccessorDescriptor + if !configurable { + goto Reject + } + } else if isDataDescriptor && descriptor.isDataDescriptor() { + // DataDescriptor <=> DataDescriptor + if !configurable { + if !property.writable() && descriptor.writable() { + goto Reject + } + if !property.writable() { + if descriptor.value != nil && !sameValue(value, descriptor.value.(Value)) { + goto Reject + } + } + } + } else { + // AccessorDescriptor <=> AccessorDescriptor + newGetSet, _ := descriptor.value.(_propertyGetSet) + presentGet, presentSet := true, true + if newGetSet[0] == &_nilGetSetObject { + // Present, but nil + newGetSet[0] = nil + } else if newGetSet[0] == nil { + // Missing, not even nil + newGetSet[0] = getSet[0] + presentGet = false + } + if newGetSet[1] == &_nilGetSetObject { + // Present, but nil + newGetSet[1] = nil + } else if newGetSet[1] == nil { + // Missing, not even nil + newGetSet[1] = getSet[1] + presentSet = false + } + if !configurable { + if (presentGet && (getSet[0] != newGetSet[0])) || (presentSet && (getSet[1] != newGetSet[1])) { + goto Reject + } + } + descriptor.value = newGetSet + } + { + // This section will preserve attributes of + // the original property, if necessary + value1 := descriptor.value + if value1 == nil { + value1 = property.value + } else if newGetSet, isAccessor := descriptor.value.(_propertyGetSet); isAccessor { + if newGetSet[0] == &_nilGetSetObject { + newGetSet[0] = nil + } + if newGetSet[1] == &_nilGetSetObject { + newGetSet[1] = nil + } + value1 = newGetSet + } + mode1 := descriptor.mode + if mode1&0222 != 0 { + // TODO Factor this out into somewhere testable + // (Maybe put into switch ...) + mode0 := property.mode + if mode1&0200 != 0 { + if descriptor.isDataDescriptor() { + mode1 &= ^0200 // Turn off "writable" missing + mode1 |= (mode0 & 0100) + } + } + if mode1&020 != 0 { + mode1 |= (mode0 & 010) + } + if mode1&02 != 0 { + mode1 |= (mode0 & 01) + } + mode1 &= 0311 // 0311 to preserve the non-setting on "writable" + } + self._write(name, value1, mode1) + } + return true + } +Reject: + if throw { + panic(self.runtime.panicTypeError()) + } + return false +} + +func objectDelete(self *_object, name string, throw bool) bool { + property_ := self.getOwnProperty(name) + if property_ == nil { + return true + } + if property_.configurable() { + self._delete(name) + return true + } + return self.runtime.typeErrorResult(throw) +} + +func objectClone(in *_object, out *_object, clone *_clone) *_object { + *out = *in + + out.runtime = clone.runtime + if out.prototype != nil { + out.prototype = clone.object(in.prototype) + } + out.property = make(map[string]_property, len(in.property)) + out.propertyOrder = make([]string, len(in.propertyOrder)) + copy(out.propertyOrder, in.propertyOrder) + for index, property := range in.property { + out.property[index] = clone.property(property) + } + + switch value := in.value.(type) { + case _nativeFunctionObject: + out.value = value + case _bindFunctionObject: + out.value = _bindFunctionObject{ + target: clone.object(value.target), + this: clone.value(value.this), + argumentList: clone.valueArray(value.argumentList), + } + case _nodeFunctionObject: + out.value = _nodeFunctionObject{ + node: value.node, + stash: clone.stash(value.stash), + } + case _argumentsObject: + out.value = value.clone(clone) + } + + return out +} diff --git a/vendor/github.com/robertkrimen/otto/otto.go b/vendor/github.com/robertkrimen/otto/otto.go new file mode 100644 index 000000000..7d9065851 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/otto.go @@ -0,0 +1,769 @@ +/* +Package otto is a JavaScript parser and interpreter written natively in Go. + +http://godoc.org/github.com/robertkrimen/otto + + import ( + "github.com/robertkrimen/otto" + ) + +Run something in the VM + + vm := otto.New() + vm.Run(` + abc = 2 + 2; + console.log("The value of abc is " + abc); // 4 + `) + +Get a value out of the VM + + value, err := vm.Get("abc") + value, _ := value.ToInteger() + } + +Set a number + + vm.Set("def", 11) + vm.Run(` + console.log("The value of def is " + def); + // The value of def is 11 + `) + +Set a string + + vm.Set("xyzzy", "Nothing happens.") + vm.Run(` + console.log(xyzzy.length); // 16 + `) + +Get the value of an expression + + value, _ = vm.Run("xyzzy.length") + { + // value is an int64 with a value of 16 + value, _ := value.ToInteger() + } + +An error happens + + value, err = vm.Run("abcdefghijlmnopqrstuvwxyz.length") + if err != nil { + // err = ReferenceError: abcdefghijlmnopqrstuvwxyz is not defined + // If there is an error, then value.IsUndefined() is true + ... + } + +Set a Go function + + vm.Set("sayHello", func(call otto.FunctionCall) otto.Value { + fmt.Printf("Hello, %s.\n", call.Argument(0).String()) + return otto.Value{} + }) + +Set a Go function that returns something useful + + vm.Set("twoPlus", func(call otto.FunctionCall) otto.Value { + right, _ := call.Argument(0).ToInteger() + result, _ := vm.ToValue(2 + right) + return result + }) + +Use the functions in JavaScript + + result, _ = vm.Run(` + sayHello("Xyzzy"); // Hello, Xyzzy. + sayHello(); // Hello, undefined + + result = twoPlus(2.0); // 4 + `) + +Parser + +A separate parser is available in the parser package if you're just interested in building an AST. + +http://godoc.org/github.com/robertkrimen/otto/parser + +Parse and return an AST + + filename := "" // A filename is optional + src := ` + // Sample xyzzy example + (function(){ + if (3.14159 > 0) { + console.log("Hello, World."); + return; + } + + var xyzzy = NaN; + console.log("Nothing happens."); + return xyzzy; + })(); + ` + + // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList + program, err := parser.ParseFile(nil, filename, src, 0) + +otto + +You can run (Go) JavaScript from the commandline with: http://github.com/robertkrimen/otto/tree/master/otto + + $ go get -v github.com/robertkrimen/otto/otto + +Run JavaScript by entering some source on stdin or by giving otto a filename: + + $ otto example.js + +underscore + +Optionally include the JavaScript utility-belt library, underscore, with this import: + + import ( + "github.com/robertkrimen/otto" + _ "github.com/robertkrimen/otto/underscore" + ) + + // Now every otto runtime will come loaded with underscore + +For more information: http://github.com/robertkrimen/otto/tree/master/underscore + +Caveat Emptor + +The following are some limitations with otto: + + * "use strict" will parse, but does nothing. + * The regular expression engine (re2/regexp) is not fully compatible with the ECMA5 specification. + +Regular Expression Incompatibility + +Go translates JavaScript-style regular expressions into something that is "regexp" compatible via `parser.TransformRegExp`. +Unfortunately, RegExp requires backtracking for some patterns, and backtracking is not supported by the standard Go engine: https://code.google.com/p/re2/wiki/Syntax + +Therefore, the following syntax is incompatible: + + (?=) // Lookahead (positive), currently a parsing error + (?!) // Lookahead (backhead), currently a parsing error + \1 // Backreference (\1, \2, \3, ...), currently a parsing error + +A brief discussion of these limitations: "Regexp (?!re)" https://groups.google.com/forum/?fromgroups=#%21topic/golang-nuts/7qgSDWPIh_E + +More information about re2: https://code.google.com/p/re2/ + +In addition to the above, re2 (Go) has a different definition for \s: [\t\n\f\r ]. +The JavaScript definition, on the other hand, also includes \v, Unicode "Separator, Space", etc. + +Halting Problem + +If you want to stop long running executions (like third-party code), you can use the interrupt channel to do this: + + package main + + import ( + "errors" + "fmt" + "os" + "time" + + "github.com/robertkrimen/otto" + ) + + var halt = errors.New("Stahp") + + func main() { + runUnsafe(`var abc = [];`) + runUnsafe(` + while (true) { + // Loop forever + }`) + } + + func runUnsafe(unsafe string) { + start := time.Now() + defer func() { + duration := time.Since(start) + if caught := recover(); caught != nil { + if caught == halt { + fmt.Fprintf(os.Stderr, "Some code took to long! Stopping after: %v\n", duration) + return + } + panic(caught) // Something else happened, repanic! + } + fmt.Fprintf(os.Stderr, "Ran code successfully: %v\n", duration) + }() + + vm := otto.New() + vm.Interrupt = make(chan func(), 1) // The buffer prevents blocking + + go func() { + time.Sleep(2 * time.Second) // Stop after two seconds + vm.Interrupt <- func() { + panic(halt) + } + }() + + vm.Run(unsafe) // Here be dragons (risky code) + } + +Where is setTimeout/setInterval? + +These timing functions are not actually part of the ECMA-262 specification. Typically, they belong to the `windows` object (in the browser). +It would not be difficult to provide something like these via Go, but you probably want to wrap otto in an event loop in that case. + +For an example of how this could be done in Go with otto, see natto: + +http://github.com/robertkrimen/natto + +Here is some more discussion of the issue: + +* http://book.mixu.net/node/ch2.html + +* http://en.wikipedia.org/wiki/Reentrancy_%28computing%29 + +* http://aaroncrane.co.uk/2009/02/perl_safe_signals/ + +*/ +package otto + +import ( + "fmt" + "strings" + + "github.com/robertkrimen/otto/file" + "github.com/robertkrimen/otto/registry" +) + +// Otto is the representation of the JavaScript runtime. Each instance of Otto has a self-contained namespace. +type Otto struct { + // Interrupt is a channel for interrupting the runtime. You can use this to halt a long running execution, for example. + // See "Halting Problem" for more information. + Interrupt chan func() + runtime *_runtime +} + +// New will allocate a new JavaScript runtime +func New() *Otto { + self := &Otto{ + runtime: newContext(), + } + self.runtime.otto = self + self.runtime.traceLimit = 10 + self.Set("console", self.runtime.newConsole()) + + registry.Apply(func(entry registry.Entry) { + self.Run(entry.Source()) + }) + + return self +} + +func (otto *Otto) clone() *Otto { + self := &Otto{ + runtime: otto.runtime.clone(), + } + self.runtime.otto = self + return self +} + +// Run will allocate a new JavaScript runtime, run the given source +// on the allocated runtime, and return the runtime, resulting value, and +// error (if any). +// +// src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST always be in UTF-8. +// +// src may also be a Script. +// +// src may also be a Program, but if the AST has been modified, then runtime behavior is undefined. +// +func Run(src interface{}) (*Otto, Value, error) { + otto := New() + value, err := otto.Run(src) // This already does safety checking + return otto, value, err +} + +// Run will run the given source (parsing it first if necessary), returning the resulting value and error (if any) +// +// src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST always be in UTF-8. +// +// If the runtime is unable to parse source, then this function will return undefined and the parse error (nothing +// will be evaluated in this case). +// +// src may also be a Script. +// +// src may also be a Program, but if the AST has been modified, then runtime behavior is undefined. +// +func (self Otto) Run(src interface{}) (Value, error) { + value, err := self.runtime.cmpl_run(src, nil) + if !value.safe() { + value = Value{} + } + return value, err +} + +// Eval will do the same thing as Run, except without leaving the current scope. +// +// By staying in the same scope, the code evaluated has access to everything +// already defined in the current stack frame. This is most useful in, for +// example, a debugger call. +func (self Otto) Eval(src interface{}) (Value, error) { + if self.runtime.scope == nil { + self.runtime.enterGlobalScope() + defer self.runtime.leaveScope() + } + + value, err := self.runtime.cmpl_eval(src, nil) + if !value.safe() { + value = Value{} + } + return value, err +} + +// Get the value of the top-level binding of the given name. +// +// If there is an error (like the binding does not exist), then the value +// will be undefined. +func (self Otto) Get(name string) (Value, error) { + value := Value{} + err := catchPanic(func() { + value = self.getValue(name) + }) + if !value.safe() { + value = Value{} + } + return value, err +} + +func (self Otto) getValue(name string) Value { + return self.runtime.globalStash.getBinding(name, false) +} + +// Set the top-level binding of the given name to the given value. +// +// Set will automatically apply ToValue to the given value in order +// to convert it to a JavaScript value (type Value). +// +// If there is an error (like the binding is read-only, or the ToValue conversion +// fails), then an error is returned. +// +// If the top-level binding does not exist, it will be created. +func (self Otto) Set(name string, value interface{}) error { + { + value, err := self.ToValue(value) + if err != nil { + return err + } + err = catchPanic(func() { + self.setValue(name, value) + }) + return err + } +} + +func (self Otto) setValue(name string, value Value) { + self.runtime.globalStash.setValue(name, value, false) +} + +func (self Otto) SetDebuggerHandler(fn func(vm *Otto)) { + self.runtime.debugger = fn +} + +func (self Otto) SetRandomSource(fn func() float64) { + self.runtime.random = fn +} + +// SetStackDepthLimit sets an upper limit to the depth of the JavaScript +// stack. In simpler terms, this limits the number of "nested" function calls +// you can make in a particular interpreter instance. +// +// Note that this doesn't take into account the Go stack depth. If your +// JavaScript makes a call to a Go function, otto won't keep track of what +// happens outside the interpreter. So if your Go function is infinitely +// recursive, you're still in trouble. +func (self Otto) SetStackDepthLimit(limit int) { + self.runtime.stackLimit = limit +} + +// SetStackTraceLimit sets an upper limit to the number of stack frames that +// otto will use when formatting an error's stack trace. By default, the limit +// is 10. This is consistent with V8 and SpiderMonkey. +// +// TODO: expose via `Error.stackTraceLimit` +func (self Otto) SetStackTraceLimit(limit int) { + self.runtime.traceLimit = limit +} + +// MakeCustomError creates a new Error object with the given name and message, +// returning it as a Value. +func (self Otto) MakeCustomError(name, message string) Value { + return self.runtime.toValue(self.runtime.newError(name, self.runtime.toValue(message), 0)) +} + +// MakeRangeError creates a new RangeError object with the given message, +// returning it as a Value. +func (self Otto) MakeRangeError(message string) Value { + return self.runtime.toValue(self.runtime.newRangeError(self.runtime.toValue(message))) +} + +// MakeSyntaxError creates a new SyntaxError object with the given message, +// returning it as a Value. +func (self Otto) MakeSyntaxError(message string) Value { + return self.runtime.toValue(self.runtime.newSyntaxError(self.runtime.toValue(message))) +} + +// MakeTypeError creates a new TypeError object with the given message, +// returning it as a Value. +func (self Otto) MakeTypeError(message string) Value { + return self.runtime.toValue(self.runtime.newTypeError(self.runtime.toValue(message))) +} + +// Context is a structure that contains information about the current execution +// context. +type Context struct { + Filename string + Line int + Column int + Callee string + Symbols map[string]Value + This Value + Stacktrace []string +} + +// Context returns the current execution context of the vm, traversing up to +// ten stack frames, and skipping any innermost native function stack frames. +func (self Otto) Context() Context { + return self.ContextSkip(10, true) +} + +// ContextLimit returns the current execution context of the vm, with a +// specific limit on the number of stack frames to traverse, skipping any +// innermost native function stack frames. +func (self Otto) ContextLimit(limit int) Context { + return self.ContextSkip(limit, true) +} + +// ContextSkip returns the current execution context of the vm, with a +// specific limit on the number of stack frames to traverse, optionally +// skipping any innermost native function stack frames. +func (self Otto) ContextSkip(limit int, skipNative bool) (ctx Context) { + // Ensure we are operating in a scope + if self.runtime.scope == nil { + self.runtime.enterGlobalScope() + defer self.runtime.leaveScope() + } + + scope := self.runtime.scope + frame := scope.frame + + for skipNative && frame.native && scope.outer != nil { + scope = scope.outer + frame = scope.frame + } + + // Get location information + ctx.Filename = "" + ctx.Callee = frame.callee + + switch { + case frame.native: + ctx.Filename = frame.nativeFile + ctx.Line = frame.nativeLine + ctx.Column = 0 + case frame.file != nil: + ctx.Filename = "" + + if p := frame.file.Position(file.Idx(frame.offset)); p != nil { + ctx.Line = p.Line + ctx.Column = p.Column + + if p.Filename != "" { + ctx.Filename = p.Filename + } + } + } + + // Get the current scope this Value + ctx.This = toValue_object(scope.this) + + // Build stacktrace (up to 10 levels deep) + ctx.Symbols = make(map[string]Value) + ctx.Stacktrace = append(ctx.Stacktrace, frame.location()) + for limit != 0 { + // Get variables + stash := scope.lexical + for { + for _, name := range getStashProperties(stash) { + if _, ok := ctx.Symbols[name]; !ok { + ctx.Symbols[name] = stash.getBinding(name, true) + } + } + stash = stash.outer() + if stash == nil || stash.outer() == nil { + break + } + } + + scope = scope.outer + if scope == nil { + break + } + if scope.frame.offset >= 0 { + ctx.Stacktrace = append(ctx.Stacktrace, scope.frame.location()) + } + limit-- + } + + return +} + +// Call the given JavaScript with a given this and arguments. +// +// If this is nil, then some special handling takes place to determine the proper +// this value, falling back to a "standard" invocation if necessary (where this is +// undefined). +// +// If source begins with "new " (A lowercase new followed by a space), then +// Call will invoke the function constructor rather than performing a function call. +// In this case, the this argument has no effect. +// +// // value is a String object +// value, _ := vm.Call("Object", nil, "Hello, World.") +// +// // Likewise... +// value, _ := vm.Call("new Object", nil, "Hello, World.") +// +// // This will perform a concat on the given array and return the result +// // value is [ 1, 2, 3, undefined, 4, 5, 6, 7, "abc" ] +// value, _ := vm.Call(`[ 1, 2, 3, undefined, 4 ].concat`, nil, 5, 6, 7, "abc") +// +func (self Otto) Call(source string, this interface{}, argumentList ...interface{}) (Value, error) { + + thisValue := Value{} + + construct := false + if strings.HasPrefix(source, "new ") { + source = source[4:] + construct = true + } + + // FIXME enterGlobalScope + self.runtime.enterGlobalScope() + defer func() { + self.runtime.leaveScope() + }() + + if !construct && this == nil { + program, err := self.runtime.cmpl_parse("", source+"()", nil) + if err == nil { + if node, ok := program.body[0].(*_nodeExpressionStatement); ok { + if node, ok := node.expression.(*_nodeCallExpression); ok { + var value Value + err := catchPanic(func() { + value = self.runtime.cmpl_evaluate_nodeCallExpression(node, argumentList) + }) + if err != nil { + return Value{}, err + } + return value, nil + } + } + } + } else { + value, err := self.ToValue(this) + if err != nil { + return Value{}, err + } + thisValue = value + } + + { + this := thisValue + + fn, err := self.Run(source) + if err != nil { + return Value{}, err + } + + if construct { + result, err := fn.constructSafe(self.runtime, this, argumentList...) + if err != nil { + return Value{}, err + } + return result, nil + } + + result, err := fn.Call(this, argumentList...) + if err != nil { + return Value{}, err + } + return result, nil + } +} + +// Object will run the given source and return the result as an object. +// +// For example, accessing an existing object: +// +// object, _ := vm.Object(`Number`) +// +// Or, creating a new object: +// +// object, _ := vm.Object(`({ xyzzy: "Nothing happens." })`) +// +// Or, creating and assigning an object: +// +// object, _ := vm.Object(`xyzzy = {}`) +// object.Set("volume", 11) +// +// If there is an error (like the source does not result in an object), then +// nil and an error is returned. +func (self Otto) Object(source string) (*Object, error) { + value, err := self.runtime.cmpl_run(source, nil) + if err != nil { + return nil, err + } + if value.IsObject() { + return value.Object(), nil + } + return nil, fmt.Errorf("value is not an object") +} + +// ToValue will convert an interface{} value to a value digestible by otto/JavaScript. +func (self Otto) ToValue(value interface{}) (Value, error) { + return self.runtime.safeToValue(value) +} + +// Copy will create a copy/clone of the runtime. +// +// Copy is useful for saving some time when creating many similar runtimes. +// +// This method works by walking the original runtime and cloning each object, scope, stash, +// etc. into a new runtime. +// +// Be on the lookout for memory leaks or inadvertent sharing of resources. +func (in *Otto) Copy() *Otto { + out := &Otto{ + runtime: in.runtime.clone(), + } + out.runtime.otto = out + return out +} + +// Object{} + +// Object is the representation of a JavaScript object. +type Object struct { + object *_object + value Value +} + +func _newObject(object *_object, value Value) *Object { + // value MUST contain object! + return &Object{ + object: object, + value: value, + } +} + +// Call a method on the object. +// +// It is essentially equivalent to: +// +// var method, _ := object.Get(name) +// method.Call(object, argumentList...) +// +// An undefined value and an error will result if: +// +// 1. There is an error during conversion of the argument list +// 2. The property is not actually a function +// 3. An (uncaught) exception is thrown +// +func (self Object) Call(name string, argumentList ...interface{}) (Value, error) { + // TODO: Insert an example using JavaScript below... + // e.g., Object("JSON").Call("stringify", ...) + + function, err := self.Get(name) + if err != nil { + return Value{}, err + } + return function.Call(self.Value(), argumentList...) +} + +// Value will return self as a value. +func (self Object) Value() Value { + return self.value +} + +// Get the value of the property with the given name. +func (self Object) Get(name string) (Value, error) { + value := Value{} + err := catchPanic(func() { + value = self.object.get(name) + }) + if !value.safe() { + value = Value{} + } + return value, err +} + +// Set the property of the given name to the given value. +// +// An error will result if the setting the property triggers an exception (i.e. read-only), +// or there is an error during conversion of the given value. +func (self Object) Set(name string, value interface{}) error { + { + value, err := self.object.runtime.safeToValue(value) + if err != nil { + return err + } + err = catchPanic(func() { + self.object.put(name, value, true) + }) + return err + } +} + +// Keys gets the keys for the given object. +// +// Equivalent to calling Object.keys on the object. +func (self Object) Keys() []string { + var keys []string + self.object.enumerate(false, func(name string) bool { + keys = append(keys, name) + return true + }) + return keys +} + +// KeysByParent gets the keys (and those of the parents) for the given object, +// in order of "closest" to "furthest". +func (self Object) KeysByParent() [][]string { + var a [][]string + + for o := self.object; o != nil; o = o.prototype { + var l []string + + o.enumerate(false, func(name string) bool { + l = append(l, name) + return true + }) + + a = append(a, l) + } + + return a +} + +// Class will return the class string of the object. +// +// The return value will (generally) be one of: +// +// Object +// Function +// Array +// String +// Number +// Boolean +// Date +// RegExp +// +func (self Object) Class() string { + return self.object.class +} diff --git a/vendor/github.com/robertkrimen/otto/otto_.go b/vendor/github.com/robertkrimen/otto/otto_.go new file mode 100644 index 000000000..304a83150 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/otto_.go @@ -0,0 +1,178 @@ +package otto + +import ( + "fmt" + "regexp" + runtime_ "runtime" + "strconv" + "strings" +) + +var isIdentifier_Regexp *regexp.Regexp = regexp.MustCompile(`^[a-zA-Z\$][a-zA-Z0-9\$]*$`) + +func isIdentifier(string_ string) bool { + return isIdentifier_Regexp.MatchString(string_) +} + +func (self *_runtime) toValueArray(arguments ...interface{}) []Value { + length := len(arguments) + if length == 1 { + if valueArray, ok := arguments[0].([]Value); ok { + return valueArray + } + return []Value{self.toValue(arguments[0])} + } + + valueArray := make([]Value, length) + for index, value := range arguments { + valueArray[index] = self.toValue(value) + } + + return valueArray +} + +func stringToArrayIndex(name string) int64 { + index, err := strconv.ParseInt(name, 10, 64) + if err != nil { + return -1 + } + if index < 0 { + return -1 + } + if index >= maxUint32 { + // The value 2^32 (or above) is not a valid index because + // you cannot store a uint32 length for an index of uint32 + return -1 + } + return index +} + +func isUint32(value int64) bool { + return value >= 0 && value <= maxUint32 +} + +func arrayIndexToString(index int64) string { + return strconv.FormatInt(index, 10) +} + +func valueOfArrayIndex(array []Value, index int) Value { + value, _ := getValueOfArrayIndex(array, index) + return value +} + +func getValueOfArrayIndex(array []Value, index int) (Value, bool) { + if index >= 0 && index < len(array) { + value := array[index] + if !value.isEmpty() { + return value, true + } + } + return Value{}, false +} + +// A range index can be anything from 0 up to length. It is NOT safe to use as an index +// to an array, but is useful for slicing and in some ECMA algorithms. +func valueToRangeIndex(indexValue Value, length int64, negativeIsZero bool) int64 { + index := indexValue.number().int64 + if negativeIsZero { + if index < 0 { + index = 0 + } + // minimum(index, length) + if index >= length { + index = length + } + return index + } + + if index < 0 { + index += length + if index < 0 { + index = 0 + } + } else { + if index > length { + index = length + } + } + return index +} + +func rangeStartEnd(array []Value, size int64, negativeIsZero bool) (start, end int64) { + start = valueToRangeIndex(valueOfArrayIndex(array, 0), size, negativeIsZero) + if len(array) == 1 { + // If there is only the start argument, then end = size + end = size + return + } + + // Assuming the argument is undefined... + end = size + endValue := valueOfArrayIndex(array, 1) + if !endValue.IsUndefined() { + // Which it is not, so get the value as an array index + end = valueToRangeIndex(endValue, size, negativeIsZero) + } + return +} + +func rangeStartLength(source []Value, size int64) (start, length int64) { + start = valueToRangeIndex(valueOfArrayIndex(source, 0), size, false) + + // Assume the second argument is missing or undefined + length = int64(size) + if len(source) == 1 { + // If there is only the start argument, then length = size + return + } + + lengthValue := valueOfArrayIndex(source, 1) + if !lengthValue.IsUndefined() { + // Which it is not, so get the value as an array index + length = lengthValue.number().int64 + } + return +} + +func boolFields(input string) (result map[string]bool) { + result = map[string]bool{} + for _, word := range strings.Fields(input) { + result[word] = true + } + return result +} + +func hereBeDragons(arguments ...interface{}) string { + pc, _, _, _ := runtime_.Caller(1) + name := runtime_.FuncForPC(pc).Name() + message := fmt.Sprintf("Here be dragons -- %s", name) + if len(arguments) > 0 { + message += ": " + argument0 := fmt.Sprintf("%s", arguments[0]) + if len(arguments) == 1 { + message += argument0 + } else { + message += fmt.Sprintf(argument0, arguments[1:]...) + } + } else { + message += "." + } + return message +} + +func throwHereBeDragons(arguments ...interface{}) { + panic(hereBeDragons(arguments...)) +} + +func eachPair(list []interface{}, fn func(_0, _1 interface{})) { + for len(list) > 0 { + var _0, _1 interface{} + _0 = list[0] + list = list[1:] // Pop off first + if len(list) > 0 { + _1 = list[0] + list = list[1:] // Pop off second + } + fn(_0, _1) + } +} diff --git a/vendor/github.com/robertkrimen/otto/parser/Makefile b/vendor/github.com/robertkrimen/otto/parser/Makefile new file mode 100644 index 000000000..766fd4d0b --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/parser/Makefile @@ -0,0 +1,4 @@ +.PHONY: test + +test: + go test diff --git a/vendor/github.com/robertkrimen/otto/parser/README.markdown b/vendor/github.com/robertkrimen/otto/parser/README.markdown new file mode 100644 index 000000000..c3cae5b60 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/parser/README.markdown @@ -0,0 +1,190 @@ +# parser +-- + import "github.com/robertkrimen/otto/parser" + +Package parser implements a parser for JavaScript. + + import ( + "github.com/robertkrimen/otto/parser" + ) + +Parse and return an AST + + filename := "" // A filename is optional + src := ` + // Sample xyzzy example + (function(){ + if (3.14159 > 0) { + console.log("Hello, World."); + return; + } + + var xyzzy = NaN; + console.log("Nothing happens."); + return xyzzy; + })(); + ` + + // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList + program, err := parser.ParseFile(nil, filename, src, 0) + + +### Warning + +The parser and AST interfaces are still works-in-progress (particularly where +node types are concerned) and may change in the future. + +## Usage + +#### func ParseFile + +```go +func ParseFile(fileSet *file.FileSet, filename string, src interface{}, mode Mode) (*ast.Program, error) +``` +ParseFile parses the source code of a single JavaScript/ECMAScript source file +and returns the corresponding ast.Program node. + +If fileSet == nil, ParseFile parses source without a FileSet. If fileSet != nil, +ParseFile first adds filename and src to fileSet. + +The filename argument is optional and is used for labelling errors, etc. + +src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST +always be in UTF-8. + + // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList + program, err := parser.ParseFile(nil, "", `if (abc > 1) {}`, 0) + +#### func ParseFunction + +```go +func ParseFunction(parameterList, body string) (*ast.FunctionLiteral, error) +``` +ParseFunction parses a given parameter list and body as a function and returns +the corresponding ast.FunctionLiteral node. + +The parameter list, if any, should be a comma-separated list of identifiers. + +#### func ReadSource + +```go +func ReadSource(filename string, src interface{}) ([]byte, error) +``` + +#### func TransformRegExp + +```go +func TransformRegExp(pattern string) (string, error) +``` +TransformRegExp transforms a JavaScript pattern into a Go "regexp" pattern. + +re2 (Go) cannot do backtracking, so the presence of a lookahead (?=) (?!) or +backreference (\1, \2, ...) will cause an error. + +re2 (Go) has a different definition for \s: [\t\n\f\r ]. The JavaScript +definition, on the other hand, also includes \v, Unicode "Separator, Space", +etc. + +If the pattern is invalid (not valid even in JavaScript), then this function +returns the empty string and an error. + +If the pattern is valid, but incompatible (contains a lookahead or +backreference), then this function returns the transformation (a non-empty +string) AND an error. + +#### type Error + +```go +type Error struct { + Position file.Position + Message string +} +``` + +An Error represents a parsing error. It includes the position where the error +occurred and a message/description. + +#### func (Error) Error + +```go +func (self Error) Error() string +``` + +#### type ErrorList + +```go +type ErrorList []*Error +``` + +ErrorList is a list of *Errors. + +#### func (*ErrorList) Add + +```go +func (self *ErrorList) Add(position file.Position, msg string) +``` +Add adds an Error with given position and message to an ErrorList. + +#### func (ErrorList) Err + +```go +func (self ErrorList) Err() error +``` +Err returns an error equivalent to this ErrorList. If the list is empty, Err +returns nil. + +#### func (ErrorList) Error + +```go +func (self ErrorList) Error() string +``` +Error implements the Error interface. + +#### func (ErrorList) Len + +```go +func (self ErrorList) Len() int +``` + +#### func (ErrorList) Less + +```go +func (self ErrorList) Less(i, j int) bool +``` + +#### func (*ErrorList) Reset + +```go +func (self *ErrorList) Reset() +``` +Reset resets an ErrorList to no errors. + +#### func (ErrorList) Sort + +```go +func (self ErrorList) Sort() +``` + +#### func (ErrorList) Swap + +```go +func (self ErrorList) Swap(i, j int) +``` + +#### type Mode + +```go +type Mode uint +``` + +A Mode value is a set of flags (or 0). They control optional parser +functionality. + +```go +const ( + IgnoreRegExpErrors Mode = 1 << iota // Ignore RegExp compatibility errors (allow backtracking) +) +``` + +-- +**godocdown** http://github.com/robertkrimen/godocdown diff --git a/vendor/github.com/robertkrimen/otto/parser/dbg.go b/vendor/github.com/robertkrimen/otto/parser/dbg.go new file mode 100644 index 000000000..3c5f2f698 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/parser/dbg.go @@ -0,0 +1,9 @@ +// This file was AUTOMATICALLY GENERATED by dbg-import (smuggol) for github.com/robertkrimen/dbg + +package parser + +import ( + Dbg "github.com/robertkrimen/otto/dbg" +) + +var dbg, dbgf = Dbg.New() diff --git a/vendor/github.com/robertkrimen/otto/parser/error.go b/vendor/github.com/robertkrimen/otto/parser/error.go new file mode 100644 index 000000000..e0f74a5cf --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/parser/error.go @@ -0,0 +1,175 @@ +package parser + +import ( + "fmt" + "sort" + + "github.com/robertkrimen/otto/file" + "github.com/robertkrimen/otto/token" +) + +const ( + err_UnexpectedToken = "Unexpected token %v" + err_UnexpectedEndOfInput = "Unexpected end of input" + err_UnexpectedEscape = "Unexpected escape" +) + +// UnexpectedNumber: 'Unexpected number', +// UnexpectedString: 'Unexpected string', +// UnexpectedIdentifier: 'Unexpected identifier', +// UnexpectedReserved: 'Unexpected reserved word', +// NewlineAfterThrow: 'Illegal newline after throw', +// InvalidRegExp: 'Invalid regular expression', +// UnterminatedRegExp: 'Invalid regular expression: missing /', +// InvalidLHSInAssignment: 'Invalid left-hand side in assignment', +// InvalidLHSInForIn: 'Invalid left-hand side in for-in', +// MultipleDefaultsInSwitch: 'More than one default clause in switch statement', +// NoCatchOrFinally: 'Missing catch or finally after try', +// UnknownLabel: 'Undefined label \'%0\'', +// Redeclaration: '%0 \'%1\' has already been declared', +// IllegalContinue: 'Illegal continue statement', +// IllegalBreak: 'Illegal break statement', +// IllegalReturn: 'Illegal return statement', +// StrictModeWith: 'Strict mode code may not include a with statement', +// StrictCatchVariable: 'Catch variable may not be eval or arguments in strict mode', +// StrictVarName: 'Variable name may not be eval or arguments in strict mode', +// StrictParamName: 'Parameter name eval or arguments is not allowed in strict mode', +// StrictParamDupe: 'Strict mode function may not have duplicate parameter names', +// StrictFunctionName: 'Function name may not be eval or arguments in strict mode', +// StrictOctalLiteral: 'Octal literals are not allowed in strict mode.', +// StrictDelete: 'Delete of an unqualified identifier in strict mode.', +// StrictDuplicateProperty: 'Duplicate data property in object literal not allowed in strict mode', +// AccessorDataProperty: 'Object literal may not have data and accessor property with the same name', +// AccessorGetSet: 'Object literal may not have multiple get/set accessors with the same name', +// StrictLHSAssignment: 'Assignment to eval or arguments is not allowed in strict mode', +// StrictLHSPostfix: 'Postfix increment/decrement may not have eval or arguments operand in strict mode', +// StrictLHSPrefix: 'Prefix increment/decrement may not have eval or arguments operand in strict mode', +// StrictReservedWord: 'Use of future reserved word in strict mode' + +// A SyntaxError is a description of an ECMAScript syntax error. + +// An Error represents a parsing error. It includes the position where the error occurred and a message/description. +type Error struct { + Position file.Position + Message string +} + +// FIXME Should this be "SyntaxError"? + +func (self Error) Error() string { + filename := self.Position.Filename + if filename == "" { + filename = "(anonymous)" + } + return fmt.Sprintf("%s: Line %d:%d %s", + filename, + self.Position.Line, + self.Position.Column, + self.Message, + ) +} + +func (self *_parser) error(place interface{}, msg string, msgValues ...interface{}) *Error { + idx := file.Idx(0) + switch place := place.(type) { + case int: + idx = self.idxOf(place) + case file.Idx: + if place == 0 { + idx = self.idxOf(self.chrOffset) + } else { + idx = place + } + default: + panic(fmt.Errorf("error(%T, ...)", place)) + } + + position := self.position(idx) + msg = fmt.Sprintf(msg, msgValues...) + self.errors.Add(position, msg) + return self.errors[len(self.errors)-1] +} + +func (self *_parser) errorUnexpected(idx file.Idx, chr rune) error { + if chr == -1 { + return self.error(idx, err_UnexpectedEndOfInput) + } + return self.error(idx, err_UnexpectedToken, token.ILLEGAL) +} + +func (self *_parser) errorUnexpectedToken(tkn token.Token) error { + switch tkn { + case token.EOF: + return self.error(file.Idx(0), err_UnexpectedEndOfInput) + } + value := tkn.String() + switch tkn { + case token.BOOLEAN, token.NULL: + value = self.literal + case token.IDENTIFIER: + return self.error(self.idx, "Unexpected identifier") + case token.KEYWORD: + // TODO Might be a future reserved word + return self.error(self.idx, "Unexpected reserved word") + case token.NUMBER: + return self.error(self.idx, "Unexpected number") + case token.STRING: + return self.error(self.idx, "Unexpected string") + } + return self.error(self.idx, err_UnexpectedToken, value) +} + +// ErrorList is a list of *Errors. +// +type ErrorList []*Error + +// Add adds an Error with given position and message to an ErrorList. +func (self *ErrorList) Add(position file.Position, msg string) { + *self = append(*self, &Error{position, msg}) +} + +// Reset resets an ErrorList to no errors. +func (self *ErrorList) Reset() { *self = (*self)[0:0] } + +func (self ErrorList) Len() int { return len(self) } +func (self ErrorList) Swap(i, j int) { self[i], self[j] = self[j], self[i] } +func (self ErrorList) Less(i, j int) bool { + x := &self[i].Position + y := &self[j].Position + if x.Filename < y.Filename { + return true + } + if x.Filename == y.Filename { + if x.Line < y.Line { + return true + } + if x.Line == y.Line { + return x.Column < y.Column + } + } + return false +} + +func (self ErrorList) Sort() { + sort.Sort(self) +} + +// Error implements the Error interface. +func (self ErrorList) Error() string { + switch len(self) { + case 0: + return "no errors" + case 1: + return self[0].Error() + } + return fmt.Sprintf("%s (and %d more errors)", self[0].Error(), len(self)-1) +} + +// Err returns an error equivalent to this ErrorList. +// If the list is empty, Err returns nil. +func (self ErrorList) Err() error { + if len(self) == 0 { + return nil + } + return self +} diff --git a/vendor/github.com/robertkrimen/otto/parser/expression.go b/vendor/github.com/robertkrimen/otto/parser/expression.go new file mode 100644 index 000000000..63a169d40 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/parser/expression.go @@ -0,0 +1,1005 @@ +package parser + +import ( + "regexp" + + "github.com/robertkrimen/otto/ast" + "github.com/robertkrimen/otto/file" + "github.com/robertkrimen/otto/token" +) + +func (self *_parser) parseIdentifier() *ast.Identifier { + literal := self.literal + idx := self.idx + if self.mode&StoreComments != 0 { + self.comments.MarkComments(ast.LEADING) + } + self.next() + exp := &ast.Identifier{ + Name: literal, + Idx: idx, + } + + if self.mode&StoreComments != 0 { + self.comments.SetExpression(exp) + } + + return exp +} + +func (self *_parser) parsePrimaryExpression() ast.Expression { + literal := self.literal + idx := self.idx + switch self.token { + case token.IDENTIFIER: + self.next() + if len(literal) > 1 { + tkn, strict := token.IsKeyword(literal) + if tkn == token.KEYWORD { + if !strict { + self.error(idx, "Unexpected reserved word") + } + } + } + return &ast.Identifier{ + Name: literal, + Idx: idx, + } + case token.NULL: + self.next() + return &ast.NullLiteral{ + Idx: idx, + Literal: literal, + } + case token.BOOLEAN: + self.next() + value := false + switch literal { + case "true": + value = true + case "false": + value = false + default: + self.error(idx, "Illegal boolean literal") + } + return &ast.BooleanLiteral{ + Idx: idx, + Literal: literal, + Value: value, + } + case token.STRING: + self.next() + value, err := parseStringLiteral(literal[1 : len(literal)-1]) + if err != nil { + self.error(idx, err.Error()) + } + return &ast.StringLiteral{ + Idx: idx, + Literal: literal, + Value: value, + } + case token.NUMBER: + self.next() + value, err := parseNumberLiteral(literal) + if err != nil { + self.error(idx, err.Error()) + value = 0 + } + return &ast.NumberLiteral{ + Idx: idx, + Literal: literal, + Value: value, + } + case token.SLASH, token.QUOTIENT_ASSIGN: + return self.parseRegExpLiteral() + case token.LEFT_BRACE: + return self.parseObjectLiteral() + case token.LEFT_BRACKET: + return self.parseArrayLiteral() + case token.LEFT_PARENTHESIS: + self.expect(token.LEFT_PARENTHESIS) + expression := self.parseExpression() + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.expect(token.RIGHT_PARENTHESIS) + return expression + case token.THIS: + self.next() + return &ast.ThisExpression{ + Idx: idx, + } + case token.FUNCTION: + return self.parseFunction(false) + } + + self.errorUnexpectedToken(self.token) + self.nextStatement() + return &ast.BadExpression{From: idx, To: self.idx} +} + +func (self *_parser) parseRegExpLiteral() *ast.RegExpLiteral { + + offset := self.chrOffset - 1 // Opening slash already gotten + if self.token == token.QUOTIENT_ASSIGN { + offset -= 1 // = + } + idx := self.idxOf(offset) + + pattern, err := self.scanString(offset) + endOffset := self.chrOffset + + self.next() + if err == nil { + pattern = pattern[1 : len(pattern)-1] + } + + flags := "" + if self.token == token.IDENTIFIER { // gim + + flags = self.literal + self.next() + endOffset = self.chrOffset - 1 + } + + var value string + // TODO 15.10 + { + // Test during parsing that this is a valid regular expression + // Sorry, (?=) and (?!) are invalid (for now) + pattern, err := TransformRegExp(pattern) + if err != nil { + if pattern == "" || self.mode&IgnoreRegExpErrors == 0 { + self.error(idx, "Invalid regular expression: %s", err.Error()) + } + } else { + _, err = regexp.Compile(pattern) + if err != nil { + // We should not get here, ParseRegExp should catch any errors + self.error(idx, "Invalid regular expression: %s", err.Error()[22:]) // Skip redundant "parse regexp error" + } else { + value = pattern + } + } + } + + literal := self.str[offset:endOffset] + + return &ast.RegExpLiteral{ + Idx: idx, + Literal: literal, + Pattern: pattern, + Flags: flags, + Value: value, + } +} + +func (self *_parser) parseVariableDeclaration(declarationList *[]*ast.VariableExpression) ast.Expression { + + if self.token != token.IDENTIFIER { + idx := self.expect(token.IDENTIFIER) + self.nextStatement() + return &ast.BadExpression{From: idx, To: self.idx} + } + + literal := self.literal + idx := self.idx + self.next() + node := &ast.VariableExpression{ + Name: literal, + Idx: idx, + } + if self.mode&StoreComments != 0 { + self.comments.SetExpression(node) + } + + if declarationList != nil { + *declarationList = append(*declarationList, node) + } + + if self.token == token.ASSIGN { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + node.Initializer = self.parseAssignmentExpression() + } + + return node +} + +func (self *_parser) parseVariableDeclarationList(var_ file.Idx) []ast.Expression { + + var declarationList []*ast.VariableExpression // Avoid bad expressions + var list []ast.Expression + + for { + if self.mode&StoreComments != 0 { + self.comments.MarkComments(ast.LEADING) + } + decl := self.parseVariableDeclaration(&declarationList) + list = append(list, decl) + if self.token != token.COMMA { + break + } + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + } + + self.scope.declare(&ast.VariableDeclaration{ + Var: var_, + List: declarationList, + }) + + return list +} + +func (self *_parser) parseObjectPropertyKey() (string, string) { + idx, tkn, literal := self.idx, self.token, self.literal + value := "" + if self.mode&StoreComments != 0 { + self.comments.MarkComments(ast.KEY) + } + self.next() + + switch tkn { + case token.IDENTIFIER: + value = literal + case token.NUMBER: + var err error + _, err = parseNumberLiteral(literal) + if err != nil { + self.error(idx, err.Error()) + } else { + value = literal + } + case token.STRING: + var err error + value, err = parseStringLiteral(literal[1 : len(literal)-1]) + if err != nil { + self.error(idx, err.Error()) + } + default: + // null, false, class, etc. + if matchIdentifier.MatchString(literal) { + value = literal + } + } + return literal, value +} + +func (self *_parser) parseObjectProperty() ast.Property { + literal, value := self.parseObjectPropertyKey() + if literal == "get" && self.token != token.COLON { + idx := self.idx + _, value := self.parseObjectPropertyKey() + parameterList := self.parseFunctionParameterList() + + node := &ast.FunctionLiteral{ + Function: idx, + ParameterList: parameterList, + } + self.parseFunctionBlock(node) + return ast.Property{ + Key: value, + Kind: "get", + Value: node, + } + } else if literal == "set" && self.token != token.COLON { + idx := self.idx + _, value := self.parseObjectPropertyKey() + parameterList := self.parseFunctionParameterList() + + node := &ast.FunctionLiteral{ + Function: idx, + ParameterList: parameterList, + } + self.parseFunctionBlock(node) + return ast.Property{ + Key: value, + Kind: "set", + Value: node, + } + } + + if self.mode&StoreComments != 0 { + self.comments.MarkComments(ast.COLON) + } + self.expect(token.COLON) + + exp := ast.Property{ + Key: value, + Kind: "value", + Value: self.parseAssignmentExpression(), + } + + if self.mode&StoreComments != 0 { + self.comments.SetExpression(exp.Value) + } + return exp +} + +func (self *_parser) parseObjectLiteral() ast.Expression { + var value []ast.Property + idx0 := self.expect(token.LEFT_BRACE) + for self.token != token.RIGHT_BRACE && self.token != token.EOF { + value = append(value, self.parseObjectProperty()) + if self.token == token.COMMA { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + continue + } + } + if self.mode&StoreComments != 0 { + self.comments.MarkComments(ast.FINAL) + } + idx1 := self.expect(token.RIGHT_BRACE) + + return &ast.ObjectLiteral{ + LeftBrace: idx0, + RightBrace: idx1, + Value: value, + } +} + +func (self *_parser) parseArrayLiteral() ast.Expression { + idx0 := self.expect(token.LEFT_BRACKET) + var value []ast.Expression + for self.token != token.RIGHT_BRACKET && self.token != token.EOF { + if self.token == token.COMMA { + // This kind of comment requires a special empty expression node. + empty := &ast.EmptyExpression{self.idx, self.idx} + + if self.mode&StoreComments != 0 { + self.comments.SetExpression(empty) + self.comments.Unset() + } + value = append(value, empty) + self.next() + continue + } + + exp := self.parseAssignmentExpression() + + value = append(value, exp) + if self.token != token.RIGHT_BRACKET { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.expect(token.COMMA) + } + } + if self.mode&StoreComments != 0 { + self.comments.MarkComments(ast.FINAL) + } + idx1 := self.expect(token.RIGHT_BRACKET) + + return &ast.ArrayLiteral{ + LeftBracket: idx0, + RightBracket: idx1, + Value: value, + } +} + +func (self *_parser) parseArgumentList() (argumentList []ast.Expression, idx0, idx1 file.Idx) { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + idx0 = self.expect(token.LEFT_PARENTHESIS) + if self.token != token.RIGHT_PARENTHESIS { + for { + exp := self.parseAssignmentExpression() + if self.mode&StoreComments != 0 { + self.comments.SetExpression(exp) + } + argumentList = append(argumentList, exp) + if self.token != token.COMMA { + break + } + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + } + } + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + idx1 = self.expect(token.RIGHT_PARENTHESIS) + return +} + +func (self *_parser) parseCallExpression(left ast.Expression) ast.Expression { + argumentList, idx0, idx1 := self.parseArgumentList() + exp := &ast.CallExpression{ + Callee: left, + LeftParenthesis: idx0, + ArgumentList: argumentList, + RightParenthesis: idx1, + } + + if self.mode&StoreComments != 0 { + self.comments.SetExpression(exp) + } + return exp +} + +func (self *_parser) parseDotMember(left ast.Expression) ast.Expression { + period := self.expect(token.PERIOD) + + literal := self.literal + idx := self.idx + + if !matchIdentifier.MatchString(literal) { + self.expect(token.IDENTIFIER) + self.nextStatement() + return &ast.BadExpression{From: period, To: self.idx} + } + + self.next() + + return &ast.DotExpression{ + Left: left, + Identifier: &ast.Identifier{ + Idx: idx, + Name: literal, + }, + } +} + +func (self *_parser) parseBracketMember(left ast.Expression) ast.Expression { + idx0 := self.expect(token.LEFT_BRACKET) + member := self.parseExpression() + idx1 := self.expect(token.RIGHT_BRACKET) + return &ast.BracketExpression{ + LeftBracket: idx0, + Left: left, + Member: member, + RightBracket: idx1, + } +} + +func (self *_parser) parseNewExpression() ast.Expression { + idx := self.expect(token.NEW) + callee := self.parseLeftHandSideExpression() + node := &ast.NewExpression{ + New: idx, + Callee: callee, + } + if self.token == token.LEFT_PARENTHESIS { + argumentList, idx0, idx1 := self.parseArgumentList() + node.ArgumentList = argumentList + node.LeftParenthesis = idx0 + node.RightParenthesis = idx1 + } + + if self.mode&StoreComments != 0 { + self.comments.SetExpression(node) + } + + return node +} + +func (self *_parser) parseLeftHandSideExpression() ast.Expression { + + var left ast.Expression + if self.token == token.NEW { + left = self.parseNewExpression() + } else { + if self.mode&StoreComments != 0 { + self.comments.MarkComments(ast.LEADING) + self.comments.MarkPrimary() + } + left = self.parsePrimaryExpression() + } + + if self.mode&StoreComments != 0 { + self.comments.SetExpression(left) + } + + for { + if self.token == token.PERIOD { + left = self.parseDotMember(left) + } else if self.token == token.LEFT_BRACKET { + left = self.parseBracketMember(left) + } else { + break + } + } + + return left +} + +func (self *_parser) parseLeftHandSideExpressionAllowCall() ast.Expression { + + allowIn := self.scope.allowIn + self.scope.allowIn = true + defer func() { + self.scope.allowIn = allowIn + }() + + var left ast.Expression + if self.token == token.NEW { + var newComments []*ast.Comment + if self.mode&StoreComments != 0 { + newComments = self.comments.FetchAll() + self.comments.MarkComments(ast.LEADING) + self.comments.MarkPrimary() + } + left = self.parseNewExpression() + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(left, newComments, ast.LEADING) + } + } else { + if self.mode&StoreComments != 0 { + self.comments.MarkComments(ast.LEADING) + self.comments.MarkPrimary() + } + left = self.parsePrimaryExpression() + } + + if self.mode&StoreComments != 0 { + self.comments.SetExpression(left) + } + + for { + if self.token == token.PERIOD { + left = self.parseDotMember(left) + } else if self.token == token.LEFT_BRACKET { + left = self.parseBracketMember(left) + } else if self.token == token.LEFT_PARENTHESIS { + left = self.parseCallExpression(left) + } else { + break + } + } + + return left +} + +func (self *_parser) parsePostfixExpression() ast.Expression { + operand := self.parseLeftHandSideExpressionAllowCall() + + switch self.token { + case token.INCREMENT, token.DECREMENT: + // Make sure there is no line terminator here + if self.implicitSemicolon { + break + } + tkn := self.token + idx := self.idx + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + switch operand.(type) { + case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression: + default: + self.error(idx, "Invalid left-hand side in assignment") + self.nextStatement() + return &ast.BadExpression{From: idx, To: self.idx} + } + exp := &ast.UnaryExpression{ + Operator: tkn, + Idx: idx, + Operand: operand, + Postfix: true, + } + + if self.mode&StoreComments != 0 { + self.comments.SetExpression(exp) + } + + return exp + } + + return operand +} + +func (self *_parser) parseUnaryExpression() ast.Expression { + + switch self.token { + case token.PLUS, token.MINUS, token.NOT, token.BITWISE_NOT: + fallthrough + case token.DELETE, token.VOID, token.TYPEOF: + tkn := self.token + idx := self.idx + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + + return &ast.UnaryExpression{ + Operator: tkn, + Idx: idx, + Operand: self.parseUnaryExpression(), + } + case token.INCREMENT, token.DECREMENT: + tkn := self.token + idx := self.idx + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + operand := self.parseUnaryExpression() + switch operand.(type) { + case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression: + default: + self.error(idx, "Invalid left-hand side in assignment") + self.nextStatement() + return &ast.BadExpression{From: idx, To: self.idx} + } + return &ast.UnaryExpression{ + Operator: tkn, + Idx: idx, + Operand: operand, + } + } + + return self.parsePostfixExpression() +} + +func (self *_parser) parseMultiplicativeExpression() ast.Expression { + next := self.parseUnaryExpression + left := next() + + for self.token == token.MULTIPLY || self.token == token.SLASH || + self.token == token.REMAINDER { + tkn := self.token + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseAdditiveExpression() ast.Expression { + next := self.parseMultiplicativeExpression + left := next() + + for self.token == token.PLUS || self.token == token.MINUS { + tkn := self.token + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseShiftExpression() ast.Expression { + next := self.parseAdditiveExpression + left := next() + + for self.token == token.SHIFT_LEFT || self.token == token.SHIFT_RIGHT || + self.token == token.UNSIGNED_SHIFT_RIGHT { + tkn := self.token + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseRelationalExpression() ast.Expression { + next := self.parseShiftExpression + left := next() + + allowIn := self.scope.allowIn + self.scope.allowIn = true + defer func() { + self.scope.allowIn = allowIn + }() + + switch self.token { + case token.LESS, token.LESS_OR_EQUAL, token.GREATER, token.GREATER_OR_EQUAL: + tkn := self.token + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + + exp := &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: self.parseRelationalExpression(), + Comparison: true, + } + return exp + case token.INSTANCEOF: + tkn := self.token + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + + exp := &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: self.parseRelationalExpression(), + } + return exp + case token.IN: + if !allowIn { + return left + } + tkn := self.token + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + + exp := &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: self.parseRelationalExpression(), + } + return exp + } + + return left +} + +func (self *_parser) parseEqualityExpression() ast.Expression { + next := self.parseRelationalExpression + left := next() + + for self.token == token.EQUAL || self.token == token.NOT_EQUAL || + self.token == token.STRICT_EQUAL || self.token == token.STRICT_NOT_EQUAL { + tkn := self.token + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + Comparison: true, + } + } + + return left +} + +func (self *_parser) parseBitwiseAndExpression() ast.Expression { + next := self.parseEqualityExpression + left := next() + + for self.token == token.AND { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + tkn := self.token + self.next() + + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseBitwiseExclusiveOrExpression() ast.Expression { + next := self.parseBitwiseAndExpression + left := next() + + for self.token == token.EXCLUSIVE_OR { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + tkn := self.token + self.next() + + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseBitwiseOrExpression() ast.Expression { + next := self.parseBitwiseExclusiveOrExpression + left := next() + + for self.token == token.OR { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + tkn := self.token + self.next() + + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseLogicalAndExpression() ast.Expression { + next := self.parseBitwiseOrExpression + left := next() + + for self.token == token.LOGICAL_AND { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + tkn := self.token + self.next() + + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseLogicalOrExpression() ast.Expression { + next := self.parseLogicalAndExpression + left := next() + + for self.token == token.LOGICAL_OR { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + tkn := self.token + self.next() + + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseConditionlExpression() ast.Expression { + left := self.parseLogicalOrExpression() + + if self.token == token.QUESTION_MARK { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + + consequent := self.parseAssignmentExpression() + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.expect(token.COLON) + exp := &ast.ConditionalExpression{ + Test: left, + Consequent: consequent, + Alternate: self.parseAssignmentExpression(), + } + + return exp + } + + return left +} + +func (self *_parser) parseAssignmentExpression() ast.Expression { + left := self.parseConditionlExpression() + var operator token.Token + switch self.token { + case token.ASSIGN: + operator = self.token + case token.ADD_ASSIGN: + operator = token.PLUS + case token.SUBTRACT_ASSIGN: + operator = token.MINUS + case token.MULTIPLY_ASSIGN: + operator = token.MULTIPLY + case token.QUOTIENT_ASSIGN: + operator = token.SLASH + case token.REMAINDER_ASSIGN: + operator = token.REMAINDER + case token.AND_ASSIGN: + operator = token.AND + case token.AND_NOT_ASSIGN: + operator = token.AND_NOT + case token.OR_ASSIGN: + operator = token.OR + case token.EXCLUSIVE_OR_ASSIGN: + operator = token.EXCLUSIVE_OR + case token.SHIFT_LEFT_ASSIGN: + operator = token.SHIFT_LEFT + case token.SHIFT_RIGHT_ASSIGN: + operator = token.SHIFT_RIGHT + case token.UNSIGNED_SHIFT_RIGHT_ASSIGN: + operator = token.UNSIGNED_SHIFT_RIGHT + } + + if operator != 0 { + idx := self.idx + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + switch left.(type) { + case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression: + default: + self.error(left.Idx0(), "Invalid left-hand side in assignment") + self.nextStatement() + return &ast.BadExpression{From: idx, To: self.idx} + } + + exp := &ast.AssignExpression{ + Left: left, + Operator: operator, + Right: self.parseAssignmentExpression(), + } + + if self.mode&StoreComments != 0 { + self.comments.SetExpression(exp) + } + + return exp + } + + return left +} + +func (self *_parser) parseExpression() ast.Expression { + next := self.parseAssignmentExpression + left := next() + + if self.token == token.COMMA { + sequence := []ast.Expression{left} + for { + if self.token != token.COMMA { + break + } + self.next() + sequence = append(sequence, next()) + } + return &ast.SequenceExpression{ + Sequence: sequence, + } + } + + return left +} diff --git a/vendor/github.com/robertkrimen/otto/parser/lexer.go b/vendor/github.com/robertkrimen/otto/parser/lexer.go new file mode 100644 index 000000000..d9d69e124 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/parser/lexer.go @@ -0,0 +1,866 @@ +package parser + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/robertkrimen/otto/ast" + "github.com/robertkrimen/otto/file" + "github.com/robertkrimen/otto/token" +) + +type _chr struct { + value rune + width int +} + +var matchIdentifier = regexp.MustCompile(`^[$_\p{L}][$_\p{L}\d}]*$`) + +func isDecimalDigit(chr rune) bool { + return '0' <= chr && chr <= '9' +} + +func digitValue(chr rune) int { + switch { + case '0' <= chr && chr <= '9': + return int(chr - '0') + case 'a' <= chr && chr <= 'f': + return int(chr - 'a' + 10) + case 'A' <= chr && chr <= 'F': + return int(chr - 'A' + 10) + } + return 16 // Larger than any legal digit value +} + +func isDigit(chr rune, base int) bool { + return digitValue(chr) < base +} + +func isIdentifierStart(chr rune) bool { + return chr == '$' || chr == '_' || chr == '\\' || + 'a' <= chr && chr <= 'z' || 'A' <= chr && chr <= 'Z' || + chr >= utf8.RuneSelf && unicode.IsLetter(chr) +} + +func isIdentifierPart(chr rune) bool { + return chr == '$' || chr == '_' || chr == '\\' || + 'a' <= chr && chr <= 'z' || 'A' <= chr && chr <= 'Z' || + '0' <= chr && chr <= '9' || + chr >= utf8.RuneSelf && (unicode.IsLetter(chr) || unicode.IsDigit(chr)) +} + +func (self *_parser) scanIdentifier() (string, error) { + offset := self.chrOffset + parse := false + for isIdentifierPart(self.chr) { + if self.chr == '\\' { + distance := self.chrOffset - offset + self.read() + if self.chr != 'u' { + return "", fmt.Errorf("Invalid identifier escape character: %c (%s)", self.chr, string(self.chr)) + } + parse = true + var value rune + for j := 0; j < 4; j++ { + self.read() + decimal, ok := hex2decimal(byte(self.chr)) + if !ok { + return "", fmt.Errorf("Invalid identifier escape character: %c (%s)", self.chr, string(self.chr)) + } + value = value<<4 | decimal + } + if value == '\\' { + return "", fmt.Errorf("Invalid identifier escape value: %c (%s)", value, string(value)) + } else if distance == 0 { + if !isIdentifierStart(value) { + return "", fmt.Errorf("Invalid identifier escape value: %c (%s)", value, string(value)) + } + } else if distance > 0 { + if !isIdentifierPart(value) { + return "", fmt.Errorf("Invalid identifier escape value: %c (%s)", value, string(value)) + } + } + } + self.read() + } + literal := string(self.str[offset:self.chrOffset]) + if parse { + return parseStringLiteral(literal) + } + return literal, nil +} + +// 7.2 +func isLineWhiteSpace(chr rune) bool { + switch chr { + case '\u0009', '\u000b', '\u000c', '\u0020', '\u00a0', '\ufeff': + return true + case '\u000a', '\u000d', '\u2028', '\u2029': + return false + case '\u0085': + return false + } + return unicode.IsSpace(chr) +} + +// 7.3 +func isLineTerminator(chr rune) bool { + switch chr { + case '\u000a', '\u000d', '\u2028', '\u2029': + return true + } + return false +} + +func (self *_parser) scan() (tkn token.Token, literal string, idx file.Idx) { + + self.implicitSemicolon = false + + for { + self.skipWhiteSpace() + + idx = self.idxOf(self.chrOffset) + insertSemicolon := false + + switch chr := self.chr; { + case isIdentifierStart(chr): + var err error + literal, err = self.scanIdentifier() + if err != nil { + tkn = token.ILLEGAL + break + } + if len(literal) > 1 { + // Keywords are longer than 1 character, avoid lookup otherwise + var strict bool + tkn, strict = token.IsKeyword(literal) + + switch tkn { + + case 0: // Not a keyword + if literal == "true" || literal == "false" { + self.insertSemicolon = true + tkn = token.BOOLEAN + return + } else if literal == "null" { + self.insertSemicolon = true + tkn = token.NULL + return + } + + case token.KEYWORD: + tkn = token.KEYWORD + if strict { + // TODO If strict and in strict mode, then this is not a break + break + } + return + + case + token.THIS, + token.BREAK, + token.THROW, // A newline after a throw is not allowed, but we need to detect it + token.RETURN, + token.CONTINUE, + token.DEBUGGER: + self.insertSemicolon = true + return + + default: + return + + } + } + self.insertSemicolon = true + tkn = token.IDENTIFIER + return + case '0' <= chr && chr <= '9': + self.insertSemicolon = true + tkn, literal = self.scanNumericLiteral(false) + return + default: + self.read() + switch chr { + case -1: + if self.insertSemicolon { + self.insertSemicolon = false + self.implicitSemicolon = true + } + tkn = token.EOF + case '\r', '\n', '\u2028', '\u2029': + self.insertSemicolon = false + self.implicitSemicolon = true + self.comments.AtLineBreak() + continue + case ':': + tkn = token.COLON + case '.': + if digitValue(self.chr) < 10 { + insertSemicolon = true + tkn, literal = self.scanNumericLiteral(true) + } else { + tkn = token.PERIOD + } + case ',': + tkn = token.COMMA + case ';': + tkn = token.SEMICOLON + case '(': + tkn = token.LEFT_PARENTHESIS + case ')': + tkn = token.RIGHT_PARENTHESIS + insertSemicolon = true + case '[': + tkn = token.LEFT_BRACKET + case ']': + tkn = token.RIGHT_BRACKET + insertSemicolon = true + case '{': + tkn = token.LEFT_BRACE + case '}': + tkn = token.RIGHT_BRACE + insertSemicolon = true + case '+': + tkn = self.switch3(token.PLUS, token.ADD_ASSIGN, '+', token.INCREMENT) + if tkn == token.INCREMENT { + insertSemicolon = true + } + case '-': + tkn = self.switch3(token.MINUS, token.SUBTRACT_ASSIGN, '-', token.DECREMENT) + if tkn == token.DECREMENT { + insertSemicolon = true + } + case '*': + tkn = self.switch2(token.MULTIPLY, token.MULTIPLY_ASSIGN) + case '/': + if self.chr == '/' { + if self.mode&StoreComments != 0 { + literal := string(self.readSingleLineComment()) + self.comments.AddComment(ast.NewComment(literal, self.idx)) + continue + } + self.skipSingleLineComment() + continue + } else if self.chr == '*' { + if self.mode&StoreComments != 0 { + literal = string(self.readMultiLineComment()) + self.comments.AddComment(ast.NewComment(literal, self.idx)) + continue + } + self.skipMultiLineComment() + continue + } else { + // Could be division, could be RegExp literal + tkn = self.switch2(token.SLASH, token.QUOTIENT_ASSIGN) + insertSemicolon = true + } + case '%': + tkn = self.switch2(token.REMAINDER, token.REMAINDER_ASSIGN) + case '^': + tkn = self.switch2(token.EXCLUSIVE_OR, token.EXCLUSIVE_OR_ASSIGN) + case '<': + tkn = self.switch4(token.LESS, token.LESS_OR_EQUAL, '<', token.SHIFT_LEFT, token.SHIFT_LEFT_ASSIGN) + case '>': + tkn = self.switch6(token.GREATER, token.GREATER_OR_EQUAL, '>', token.SHIFT_RIGHT, token.SHIFT_RIGHT_ASSIGN, '>', token.UNSIGNED_SHIFT_RIGHT, token.UNSIGNED_SHIFT_RIGHT_ASSIGN) + case '=': + tkn = self.switch2(token.ASSIGN, token.EQUAL) + if tkn == token.EQUAL && self.chr == '=' { + self.read() + tkn = token.STRICT_EQUAL + } + case '!': + tkn = self.switch2(token.NOT, token.NOT_EQUAL) + if tkn == token.NOT_EQUAL && self.chr == '=' { + self.read() + tkn = token.STRICT_NOT_EQUAL + } + case '&': + if self.chr == '^' { + self.read() + tkn = self.switch2(token.AND_NOT, token.AND_NOT_ASSIGN) + } else { + tkn = self.switch3(token.AND, token.AND_ASSIGN, '&', token.LOGICAL_AND) + } + case '|': + tkn = self.switch3(token.OR, token.OR_ASSIGN, '|', token.LOGICAL_OR) + case '~': + tkn = token.BITWISE_NOT + case '?': + tkn = token.QUESTION_MARK + case '"', '\'': + insertSemicolon = true + tkn = token.STRING + var err error + literal, err = self.scanString(self.chrOffset - 1) + if err != nil { + tkn = token.ILLEGAL + } + default: + self.errorUnexpected(idx, chr) + tkn = token.ILLEGAL + } + } + self.insertSemicolon = insertSemicolon + return + } +} + +func (self *_parser) switch2(tkn0, tkn1 token.Token) token.Token { + if self.chr == '=' { + self.read() + return tkn1 + } + return tkn0 +} + +func (self *_parser) switch3(tkn0, tkn1 token.Token, chr2 rune, tkn2 token.Token) token.Token { + if self.chr == '=' { + self.read() + return tkn1 + } + if self.chr == chr2 { + self.read() + return tkn2 + } + return tkn0 +} + +func (self *_parser) switch4(tkn0, tkn1 token.Token, chr2 rune, tkn2, tkn3 token.Token) token.Token { + if self.chr == '=' { + self.read() + return tkn1 + } + if self.chr == chr2 { + self.read() + if self.chr == '=' { + self.read() + return tkn3 + } + return tkn2 + } + return tkn0 +} + +func (self *_parser) switch6(tkn0, tkn1 token.Token, chr2 rune, tkn2, tkn3 token.Token, chr3 rune, tkn4, tkn5 token.Token) token.Token { + if self.chr == '=' { + self.read() + return tkn1 + } + if self.chr == chr2 { + self.read() + if self.chr == '=' { + self.read() + return tkn3 + } + if self.chr == chr3 { + self.read() + if self.chr == '=' { + self.read() + return tkn5 + } + return tkn4 + } + return tkn2 + } + return tkn0 +} + +func (self *_parser) chrAt(index int) _chr { + value, width := utf8.DecodeRuneInString(self.str[index:]) + return _chr{ + value: value, + width: width, + } +} + +func (self *_parser) _peek() rune { + if self.offset+1 < self.length { + return rune(self.str[self.offset+1]) + } + return -1 +} + +func (self *_parser) read() { + if self.offset < self.length { + self.chrOffset = self.offset + chr, width := rune(self.str[self.offset]), 1 + if chr >= utf8.RuneSelf { // !ASCII + chr, width = utf8.DecodeRuneInString(self.str[self.offset:]) + if chr == utf8.RuneError && width == 1 { + self.error(self.chrOffset, "Invalid UTF-8 character") + } + } + self.offset += width + self.chr = chr + } else { + self.chrOffset = self.length + self.chr = -1 // EOF + } +} + +// This is here since the functions are so similar +func (self *_RegExp_parser) read() { + if self.offset < self.length { + self.chrOffset = self.offset + chr, width := rune(self.str[self.offset]), 1 + if chr >= utf8.RuneSelf { // !ASCII + chr, width = utf8.DecodeRuneInString(self.str[self.offset:]) + if chr == utf8.RuneError && width == 1 { + self.error(self.chrOffset, "Invalid UTF-8 character") + } + } + self.offset += width + self.chr = chr + } else { + self.chrOffset = self.length + self.chr = -1 // EOF + } +} + +func (self *_parser) readSingleLineComment() (result []rune) { + for self.chr != -1 { + self.read() + if isLineTerminator(self.chr) { + return + } + result = append(result, self.chr) + } + + // Get rid of the trailing -1 + result = result[:len(result)-1] + + return +} + +func (self *_parser) readMultiLineComment() (result []rune) { + self.read() + for self.chr >= 0 { + chr := self.chr + self.read() + if chr == '*' && self.chr == '/' { + self.read() + return + } + + result = append(result, chr) + } + + self.errorUnexpected(0, self.chr) + + return +} + +func (self *_parser) skipSingleLineComment() { + for self.chr != -1 { + self.read() + if isLineTerminator(self.chr) { + return + } + } +} + +func (self *_parser) skipMultiLineComment() { + self.read() + for self.chr >= 0 { + chr := self.chr + self.read() + if chr == '*' && self.chr == '/' { + self.read() + return + } + } + + self.errorUnexpected(0, self.chr) +} + +func (self *_parser) skipWhiteSpace() { + for { + switch self.chr { + case ' ', '\t', '\f', '\v', '\u00a0', '\ufeff': + self.read() + continue + case '\r': + if self._peek() == '\n' { + self.comments.AtLineBreak() + self.read() + } + fallthrough + case '\u2028', '\u2029', '\n': + if self.insertSemicolon { + return + } + self.comments.AtLineBreak() + self.read() + continue + } + if self.chr >= utf8.RuneSelf { + if unicode.IsSpace(self.chr) { + self.read() + continue + } + } + break + } +} + +func (self *_parser) skipLineWhiteSpace() { + for isLineWhiteSpace(self.chr) { + self.read() + } +} + +func (self *_parser) scanMantissa(base int) { + for digitValue(self.chr) < base { + self.read() + } +} + +func (self *_parser) scanEscape(quote rune) { + + var length, base uint32 + switch self.chr { + //case '0', '1', '2', '3', '4', '5', '6', '7': + // Octal: + // length, base, limit = 3, 8, 255 + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"', '\'', '0': + self.read() + return + case '\r', '\n', '\u2028', '\u2029': + self.scanNewline() + return + case 'x': + self.read() + length, base = 2, 16 + case 'u': + self.read() + length, base = 4, 16 + default: + self.read() // Always make progress + return + } + + var value uint32 + for ; length > 0 && self.chr != quote && self.chr >= 0; length-- { + digit := uint32(digitValue(self.chr)) + if digit >= base { + break + } + value = value*base + digit + self.read() + } +} + +func (self *_parser) scanString(offset int) (string, error) { + // " ' / + quote := rune(self.str[offset]) + + for self.chr != quote { + chr := self.chr + if chr == '\n' || chr == '\r' || chr == '\u2028' || chr == '\u2029' || chr < 0 { + goto newline + } + self.read() + if chr == '\\' { + if quote == '/' { + if self.chr == '\n' || self.chr == '\r' || self.chr == '\u2028' || self.chr == '\u2029' || self.chr < 0 { + goto newline + } + self.read() + } else { + self.scanEscape(quote) + } + } else if chr == '[' && quote == '/' { + // Allow a slash (/) in a bracket character class ([...]) + // TODO Fix this, this is hacky... + quote = -1 + } else if chr == ']' && quote == -1 { + quote = '/' + } + } + + // " ' / + self.read() + + return string(self.str[offset:self.chrOffset]), nil + +newline: + self.scanNewline() + err := "String not terminated" + if quote == '/' { + err = "Invalid regular expression: missing /" + self.error(self.idxOf(offset), err) + } + return "", errors.New(err) +} + +func (self *_parser) scanNewline() { + if self.chr == '\r' { + self.read() + if self.chr != '\n' { + return + } + } + self.read() +} + +func hex2decimal(chr byte) (value rune, ok bool) { + { + chr := rune(chr) + switch { + case '0' <= chr && chr <= '9': + return chr - '0', true + case 'a' <= chr && chr <= 'f': + return chr - 'a' + 10, true + case 'A' <= chr && chr <= 'F': + return chr - 'A' + 10, true + } + return + } +} + +func parseNumberLiteral(literal string) (value interface{}, err error) { + // TODO Is Uint okay? What about -MAX_UINT + value, err = strconv.ParseInt(literal, 0, 64) + if err == nil { + return + } + + parseIntErr := err // Save this first error, just in case + + value, err = strconv.ParseFloat(literal, 64) + if err == nil { + return + } else if err.(*strconv.NumError).Err == strconv.ErrRange { + // Infinity, etc. + return value, nil + } + + err = parseIntErr + + if err.(*strconv.NumError).Err == strconv.ErrRange { + if len(literal) > 2 && literal[0] == '0' && (literal[1] == 'X' || literal[1] == 'x') { + // Could just be a very large number (e.g. 0x8000000000000000) + var value float64 + literal = literal[2:] + for _, chr := range literal { + digit := digitValue(chr) + if digit >= 16 { + goto error + } + value = value*16 + float64(digit) + } + return value, nil + } + } + +error: + return nil, errors.New("Illegal numeric literal") +} + +func parseStringLiteral(literal string) (string, error) { + // Best case scenario... + if literal == "" { + return "", nil + } + + // Slightly less-best case scenario... + if !strings.ContainsRune(literal, '\\') { + return literal, nil + } + + str := literal + buffer := bytes.NewBuffer(make([]byte, 0, 3*len(literal)/2)) + + for len(str) > 0 { + switch chr := str[0]; { + // We do not explicitly handle the case of the quote + // value, which can be: " ' / + // This assumes we're already passed a partially well-formed literal + case chr >= utf8.RuneSelf: + chr, size := utf8.DecodeRuneInString(str) + buffer.WriteRune(chr) + str = str[size:] + continue + case chr != '\\': + buffer.WriteByte(chr) + str = str[1:] + continue + } + + if len(str) <= 1 { + panic("len(str) <= 1") + } + chr := str[1] + var value rune + if chr >= utf8.RuneSelf { + str = str[1:] + var size int + value, size = utf8.DecodeRuneInString(str) + str = str[size:] // \ + + } else { + str = str[2:] // \ + switch chr { + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u': + size := 0 + switch chr { + case 'x': + size = 2 + case 'u': + size = 4 + } + if len(str) < size { + return "", fmt.Errorf("invalid escape: \\%s: len(%q) != %d", string(chr), str, size) + } + for j := 0; j < size; j++ { + decimal, ok := hex2decimal(str[j]) + if !ok { + return "", fmt.Errorf("invalid escape: \\%s: %q", string(chr), str[:size]) + } + value = value<<4 | decimal + } + str = str[size:] + if chr == 'x' { + break + } + if value > utf8.MaxRune { + panic("value > utf8.MaxRune") + } + case '0': + if len(str) == 0 || '0' > str[0] || str[0] > '7' { + value = 0 + break + } + fallthrough + case '1', '2', '3', '4', '5', '6', '7': + // TODO strict + value = rune(chr) - '0' + j := 0 + for ; j < 2; j++ { + if len(str) < j+1 { + break + } + chr := str[j] + if '0' > chr || chr > '7' { + break + } + decimal := rune(str[j]) - '0' + value = (value << 3) | decimal + } + str = str[j:] + case '\\': + value = '\\' + case '\'', '"': + value = rune(chr) + case '\r': + if len(str) > 0 { + if str[0] == '\n' { + str = str[1:] + } + } + fallthrough + case '\n': + continue + default: + value = rune(chr) + } + } + buffer.WriteRune(value) + } + + return buffer.String(), nil +} + +func (self *_parser) scanNumericLiteral(decimalPoint bool) (token.Token, string) { + + offset := self.chrOffset + tkn := token.NUMBER + + if decimalPoint { + offset-- + self.scanMantissa(10) + goto exponent + } + + if self.chr == '0' { + offset := self.chrOffset + self.read() + if self.chr == 'x' || self.chr == 'X' { + // Hexadecimal + self.read() + if isDigit(self.chr, 16) { + self.read() + } else { + return token.ILLEGAL, self.str[offset:self.chrOffset] + } + self.scanMantissa(16) + + if self.chrOffset-offset <= 2 { + // Only "0x" or "0X" + self.error(0, "Illegal hexadecimal number") + } + + goto hexadecimal + } else if self.chr == '.' { + // Float + goto float + } else { + // Octal, Float + if self.chr == 'e' || self.chr == 'E' { + goto exponent + } + self.scanMantissa(8) + if self.chr == '8' || self.chr == '9' { + return token.ILLEGAL, self.str[offset:self.chrOffset] + } + goto octal + } + } + + self.scanMantissa(10) + +float: + if self.chr == '.' { + self.read() + self.scanMantissa(10) + } + +exponent: + if self.chr == 'e' || self.chr == 'E' { + self.read() + if self.chr == '-' || self.chr == '+' { + self.read() + } + if isDecimalDigit(self.chr) { + self.read() + self.scanMantissa(10) + } else { + return token.ILLEGAL, self.str[offset:self.chrOffset] + } + } + +hexadecimal: +octal: + if isIdentifierStart(self.chr) || isDecimalDigit(self.chr) { + return token.ILLEGAL, self.str[offset:self.chrOffset] + } + + return tkn, self.str[offset:self.chrOffset] +} diff --git a/vendor/github.com/robertkrimen/otto/parser/parser.go b/vendor/github.com/robertkrimen/otto/parser/parser.go new file mode 100644 index 000000000..75b7c500c --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/parser/parser.go @@ -0,0 +1,344 @@ +/* +Package parser implements a parser for JavaScript. + + import ( + "github.com/robertkrimen/otto/parser" + ) + +Parse and return an AST + + filename := "" // A filename is optional + src := ` + // Sample xyzzy example + (function(){ + if (3.14159 > 0) { + console.log("Hello, World."); + return; + } + + var xyzzy = NaN; + console.log("Nothing happens."); + return xyzzy; + })(); + ` + + // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList + program, err := parser.ParseFile(nil, filename, src, 0) + +Warning + +The parser and AST interfaces are still works-in-progress (particularly where +node types are concerned) and may change in the future. + +*/ +package parser + +import ( + "bytes" + "encoding/base64" + "errors" + "io" + "io/ioutil" + + "github.com/robertkrimen/otto/ast" + "github.com/robertkrimen/otto/file" + "github.com/robertkrimen/otto/token" + "gopkg.in/sourcemap.v1" +) + +// A Mode value is a set of flags (or 0). They control optional parser functionality. +type Mode uint + +const ( + IgnoreRegExpErrors Mode = 1 << iota // Ignore RegExp compatibility errors (allow backtracking) + StoreComments // Store the comments from source to the comments map +) + +type _parser struct { + str string + length int + base int + + chr rune // The current character + chrOffset int // The offset of current character + offset int // The offset after current character (may be greater than 1) + + idx file.Idx // The index of token + token token.Token // The token + literal string // The literal of the token, if any + + scope *_scope + insertSemicolon bool // If we see a newline, then insert an implicit semicolon + implicitSemicolon bool // An implicit semicolon exists + + errors ErrorList + + recover struct { + // Scratch when trying to seek to the next statement, etc. + idx file.Idx + count int + } + + mode Mode + + file *file.File + + comments *ast.Comments +} + +type Parser interface { + Scan() (tkn token.Token, literal string, idx file.Idx) +} + +func _newParser(filename, src string, base int, sm *sourcemap.Consumer) *_parser { + return &_parser{ + chr: ' ', // This is set so we can start scanning by skipping whitespace + str: src, + length: len(src), + base: base, + file: file.NewFile(filename, src, base).WithSourceMap(sm), + comments: ast.NewComments(), + } +} + +// Returns a new Parser. +func NewParser(filename, src string) Parser { + return _newParser(filename, src, 1, nil) +} + +func ReadSource(filename string, src interface{}) ([]byte, error) { + if src != nil { + switch src := src.(type) { + case string: + return []byte(src), nil + case []byte: + return src, nil + case *bytes.Buffer: + if src != nil { + return src.Bytes(), nil + } + case io.Reader: + var bfr bytes.Buffer + if _, err := io.Copy(&bfr, src); err != nil { + return nil, err + } + return bfr.Bytes(), nil + } + return nil, errors.New("invalid source") + } + return ioutil.ReadFile(filename) +} + +func ReadSourceMap(filename string, src interface{}) (*sourcemap.Consumer, error) { + if src == nil { + return nil, nil + } + + switch src := src.(type) { + case string: + return sourcemap.Parse(filename, []byte(src)) + case []byte: + return sourcemap.Parse(filename, src) + case *bytes.Buffer: + if src != nil { + return sourcemap.Parse(filename, src.Bytes()) + } + case io.Reader: + var bfr bytes.Buffer + if _, err := io.Copy(&bfr, src); err != nil { + return nil, err + } + return sourcemap.Parse(filename, bfr.Bytes()) + case *sourcemap.Consumer: + return src, nil + } + + return nil, errors.New("invalid sourcemap type") +} + +func ParseFileWithSourceMap(fileSet *file.FileSet, filename string, javascriptSource, sourcemapSource interface{}, mode Mode) (*ast.Program, error) { + src, err := ReadSource(filename, javascriptSource) + if err != nil { + return nil, err + } + + if sourcemapSource == nil { + lines := bytes.Split(src, []byte("\n")) + lastLine := lines[len(lines)-1] + if bytes.HasPrefix(lastLine, []byte("//# sourceMappingURL=data:application/json")) { + bits := bytes.SplitN(lastLine, []byte(","), 2) + if len(bits) == 2 { + if d, err := base64.StdEncoding.DecodeString(string(bits[1])); err == nil { + sourcemapSource = d + } + } + } + } + + sm, err := ReadSourceMap(filename, sourcemapSource) + if err != nil { + return nil, err + } + + base := 1 + if fileSet != nil { + base = fileSet.AddFile(filename, string(src)) + } + + parser := _newParser(filename, string(src), base, sm) + parser.mode = mode + program, err := parser.parse() + program.Comments = parser.comments.CommentMap + + return program, err +} + +// ParseFile parses the source code of a single JavaScript/ECMAScript source file and returns +// the corresponding ast.Program node. +// +// If fileSet == nil, ParseFile parses source without a FileSet. +// If fileSet != nil, ParseFile first adds filename and src to fileSet. +// +// The filename argument is optional and is used for labelling errors, etc. +// +// src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST always be in UTF-8. +// +// // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList +// program, err := parser.ParseFile(nil, "", `if (abc > 1) {}`, 0) +// +func ParseFile(fileSet *file.FileSet, filename string, src interface{}, mode Mode) (*ast.Program, error) { + return ParseFileWithSourceMap(fileSet, filename, src, nil, mode) +} + +// ParseFunction parses a given parameter list and body as a function and returns the +// corresponding ast.FunctionLiteral node. +// +// The parameter list, if any, should be a comma-separated list of identifiers. +// +func ParseFunction(parameterList, body string) (*ast.FunctionLiteral, error) { + + src := "(function(" + parameterList + ") {\n" + body + "\n})" + + parser := _newParser("", src, 1, nil) + program, err := parser.parse() + if err != nil { + return nil, err + } + + return program.Body[0].(*ast.ExpressionStatement).Expression.(*ast.FunctionLiteral), nil +} + +// Scan reads a single token from the source at the current offset, increments the offset and +// returns the token.Token token, a string literal representing the value of the token (if applicable) +// and it's current file.Idx index. +func (self *_parser) Scan() (tkn token.Token, literal string, idx file.Idx) { + return self.scan() +} + +func (self *_parser) slice(idx0, idx1 file.Idx) string { + from := int(idx0) - self.base + to := int(idx1) - self.base + if from >= 0 && to <= len(self.str) { + return self.str[from:to] + } + + return "" +} + +func (self *_parser) parse() (*ast.Program, error) { + self.next() + program := self.parseProgram() + if false { + self.errors.Sort() + } + + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(program, self.comments.FetchAll(), ast.TRAILING) + } + + return program, self.errors.Err() +} + +func (self *_parser) next() { + self.token, self.literal, self.idx = self.scan() +} + +func (self *_parser) optionalSemicolon() { + if self.token == token.SEMICOLON { + self.next() + return + } + + if self.implicitSemicolon { + self.implicitSemicolon = false + return + } + + if self.token != token.EOF && self.token != token.RIGHT_BRACE { + self.expect(token.SEMICOLON) + } +} + +func (self *_parser) semicolon() { + if self.token != token.RIGHT_PARENTHESIS && self.token != token.RIGHT_BRACE { + if self.implicitSemicolon { + self.implicitSemicolon = false + return + } + + self.expect(token.SEMICOLON) + } +} + +func (self *_parser) idxOf(offset int) file.Idx { + return file.Idx(self.base + offset) +} + +func (self *_parser) expect(value token.Token) file.Idx { + idx := self.idx + if self.token != value { + self.errorUnexpectedToken(self.token) + } + self.next() + return idx +} + +func lineCount(str string) (int, int) { + line, last := 0, -1 + pair := false + for index, chr := range str { + switch chr { + case '\r': + line += 1 + last = index + pair = true + continue + case '\n': + if !pair { + line += 1 + } + last = index + case '\u2028', '\u2029': + line += 1 + last = index + 2 + } + pair = false + } + return line, last +} + +func (self *_parser) position(idx file.Idx) file.Position { + position := file.Position{} + offset := int(idx) - self.base + str := self.str[:offset] + position.Filename = self.file.Name() + line, last := lineCount(str) + position.Line = 1 + line + if last >= 0 { + position.Column = offset - last + } else { + position.Column = 1 + len(str) + } + + return position +} diff --git a/vendor/github.com/robertkrimen/otto/parser/regexp.go b/vendor/github.com/robertkrimen/otto/parser/regexp.go new file mode 100644 index 000000000..f614dae74 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/parser/regexp.go @@ -0,0 +1,358 @@ +package parser + +import ( + "bytes" + "fmt" + "strconv" +) + +type _RegExp_parser struct { + str string + length int + + chr rune // The current character + chrOffset int // The offset of current character + offset int // The offset after current character (may be greater than 1) + + errors []error + invalid bool // The input is an invalid JavaScript RegExp + + goRegexp *bytes.Buffer +} + +// TransformRegExp transforms a JavaScript pattern into a Go "regexp" pattern. +// +// re2 (Go) cannot do backtracking, so the presence of a lookahead (?=) (?!) or +// backreference (\1, \2, ...) will cause an error. +// +// re2 (Go) has a different definition for \s: [\t\n\f\r ]. +// The JavaScript definition, on the other hand, also includes \v, Unicode "Separator, Space", etc. +// +// If the pattern is invalid (not valid even in JavaScript), then this function +// returns the empty string and an error. +// +// If the pattern is valid, but incompatible (contains a lookahead or backreference), +// then this function returns the transformation (a non-empty string) AND an error. +func TransformRegExp(pattern string) (string, error) { + + if pattern == "" { + return "", nil + } + + // TODO If without \, if without (?=, (?!, then another shortcut + + parser := _RegExp_parser{ + str: pattern, + length: len(pattern), + goRegexp: bytes.NewBuffer(make([]byte, 0, 3*len(pattern)/2)), + } + parser.read() // Pull in the first character + parser.scan() + var err error + if len(parser.errors) > 0 { + err = parser.errors[0] + } + if parser.invalid { + return "", err + } + + // Might not be re2 compatible, but is still a valid JavaScript RegExp + return parser.goRegexp.String(), err +} + +func (self *_RegExp_parser) scan() { + for self.chr != -1 { + switch self.chr { + case '\\': + self.read() + self.scanEscape(false) + case '(': + self.pass() + self.scanGroup() + case '[': + self.pass() + self.scanBracket() + case ')': + self.error(-1, "Unmatched ')'") + self.invalid = true + self.pass() + default: + self.pass() + } + } +} + +// (...) +func (self *_RegExp_parser) scanGroup() { + str := self.str[self.chrOffset:] + if len(str) > 1 { // A possibility of (?= or (?! + if str[0] == '?' { + if str[1] == '=' || str[1] == '!' { + self.error(-1, "re2: Invalid (%s) ", self.str[self.chrOffset:self.chrOffset+2]) + } + } + } + for self.chr != -1 && self.chr != ')' { + switch self.chr { + case '\\': + self.read() + self.scanEscape(false) + case '(': + self.pass() + self.scanGroup() + case '[': + self.pass() + self.scanBracket() + default: + self.pass() + continue + } + } + if self.chr != ')' { + self.error(-1, "Unterminated group") + self.invalid = true + return + } + self.pass() +} + +// [...] +func (self *_RegExp_parser) scanBracket() { + for self.chr != -1 { + if self.chr == ']' { + break + } else if self.chr == '\\' { + self.read() + self.scanEscape(true) + continue + } + self.pass() + } + if self.chr != ']' { + self.error(-1, "Unterminated character class") + self.invalid = true + return + } + self.pass() +} + +// \... +func (self *_RegExp_parser) scanEscape(inClass bool) { + offset := self.chrOffset + + var length, base uint32 + switch self.chr { + + case '0', '1', '2', '3', '4', '5', '6', '7': + var value int64 + size := 0 + for { + digit := int64(digitValue(self.chr)) + if digit >= 8 { + // Not a valid digit + break + } + value = value*8 + digit + self.read() + size += 1 + } + if size == 1 { // The number of characters read + _, err := self.goRegexp.Write([]byte{'\\', byte(value) + '0'}) + if err != nil { + self.errors = append(self.errors, err) + } + if value != 0 { + // An invalid backreference + self.error(-1, "re2: Invalid \\%d ", value) + } + return + } + tmp := []byte{'\\', 'x', '0', 0} + if value >= 16 { + tmp = tmp[0:2] + } else { + tmp = tmp[0:3] + } + tmp = strconv.AppendInt(tmp, value, 16) + _, err := self.goRegexp.Write(tmp) + if err != nil { + self.errors = append(self.errors, err) + } + return + + case '8', '9': + size := 0 + for { + digit := digitValue(self.chr) + if digit >= 10 { + // Not a valid digit + break + } + self.read() + size += 1 + } + err := self.goRegexp.WriteByte('\\') + if err != nil { + self.errors = append(self.errors, err) + } + _, err = self.goRegexp.WriteString(self.str[offset:self.chrOffset]) + if err != nil { + self.errors = append(self.errors, err) + } + self.error(-1, "re2: Invalid \\%s ", self.str[offset:self.chrOffset]) + return + + case 'x': + self.read() + length, base = 2, 16 + + case 'u': + self.read() + length, base = 4, 16 + + case 'b': + if inClass { + _, err := self.goRegexp.Write([]byte{'\\', 'x', '0', '8'}) + if err != nil { + self.errors = append(self.errors, err) + } + self.read() + return + } + fallthrough + + case 'B': + fallthrough + + case 'd', 'D', 's', 'S', 'w', 'W': + // This is slightly broken, because ECMAScript + // includes \v in \s, \S, while re2 does not + fallthrough + + case '\\': + fallthrough + + case 'f', 'n', 'r', 't', 'v': + err := self.goRegexp.WriteByte('\\') + if err != nil { + self.errors = append(self.errors, err) + } + self.pass() + return + + case 'c': + self.read() + var value int64 + if 'a' <= self.chr && self.chr <= 'z' { + value = int64(self.chr) - 'a' + 1 + } else if 'A' <= self.chr && self.chr <= 'Z' { + value = int64(self.chr) - 'A' + 1 + } else { + err := self.goRegexp.WriteByte('c') + if err != nil { + self.errors = append(self.errors, err) + } + return + } + tmp := []byte{'\\', 'x', '0', 0} + if value >= 16 { + tmp = tmp[0:2] + } else { + tmp = tmp[0:3] + } + tmp = strconv.AppendInt(tmp, value, 16) + _, err := self.goRegexp.Write(tmp) + if err != nil { + self.errors = append(self.errors, err) + } + self.read() + return + + default: + // $ is an identifier character, so we have to have + // a special case for it here + if self.chr == '$' || !isIdentifierPart(self.chr) { + // A non-identifier character needs escaping + err := self.goRegexp.WriteByte('\\') + if err != nil { + self.errors = append(self.errors, err) + } + } else { + // Unescape the character for re2 + } + self.pass() + return + } + + // Otherwise, we're a \u.... or \x... + valueOffset := self.chrOffset + + var value uint32 + { + length := length + for ; length > 0; length-- { + digit := uint32(digitValue(self.chr)) + if digit >= base { + // Not a valid digit + goto skip + } + value = value*base + digit + self.read() + } + } + + if length == 4 { + _, err := self.goRegexp.Write([]byte{ + '\\', + 'x', + '{', + self.str[valueOffset+0], + self.str[valueOffset+1], + self.str[valueOffset+2], + self.str[valueOffset+3], + '}', + }) + if err != nil { + self.errors = append(self.errors, err) + } + } else if length == 2 { + _, err := self.goRegexp.Write([]byte{ + '\\', + 'x', + self.str[valueOffset+0], + self.str[valueOffset+1], + }) + if err != nil { + self.errors = append(self.errors, err) + } + } else { + // Should never, ever get here... + self.error(-1, "re2: Illegal branch in scanEscape") + goto skip + } + + return + +skip: + _, err := self.goRegexp.WriteString(self.str[offset:self.chrOffset]) + if err != nil { + self.errors = append(self.errors, err) + } +} + +func (self *_RegExp_parser) pass() { + if self.chr != -1 { + _, err := self.goRegexp.WriteRune(self.chr) + if err != nil { + self.errors = append(self.errors, err) + } + } + self.read() +} + +// TODO Better error reporting, use the offset, etc. +func (self *_RegExp_parser) error(offset int, msg string, msgValues ...interface{}) error { + err := fmt.Errorf(msg, msgValues...) + self.errors = append(self.errors, err) + return err +} diff --git a/vendor/github.com/robertkrimen/otto/parser/scope.go b/vendor/github.com/robertkrimen/otto/parser/scope.go new file mode 100644 index 000000000..e1dbdda13 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/parser/scope.go @@ -0,0 +1,44 @@ +package parser + +import ( + "github.com/robertkrimen/otto/ast" +) + +type _scope struct { + outer *_scope + allowIn bool + inIteration bool + inSwitch bool + inFunction bool + declarationList []ast.Declaration + + labels []string +} + +func (self *_parser) openScope() { + self.scope = &_scope{ + outer: self.scope, + allowIn: true, + } +} + +func (self *_parser) closeScope() { + self.scope = self.scope.outer +} + +func (self *_scope) declare(declaration ast.Declaration) { + self.declarationList = append(self.declarationList, declaration) +} + +func (self *_scope) hasLabel(name string) bool { + for _, label := range self.labels { + if label == name { + return true + } + } + if self.outer != nil && !self.inFunction { + // Crossing a function boundary to look for a label is verboten + return self.outer.hasLabel(name) + } + return false +} diff --git a/vendor/github.com/robertkrimen/otto/parser/statement.go b/vendor/github.com/robertkrimen/otto/parser/statement.go new file mode 100644 index 000000000..6ff19d975 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/parser/statement.go @@ -0,0 +1,938 @@ +package parser + +import ( + "github.com/robertkrimen/otto/ast" + "github.com/robertkrimen/otto/token" +) + +func (self *_parser) parseBlockStatement() *ast.BlockStatement { + node := &ast.BlockStatement{} + + // Find comments before the leading brace + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node, self.comments.FetchAll(), ast.LEADING) + self.comments.Unset() + } + + node.LeftBrace = self.expect(token.LEFT_BRACE) + node.List = self.parseStatementList() + + if self.mode&StoreComments != 0 { + self.comments.Unset() + self.comments.CommentMap.AddComments(node, self.comments.FetchAll(), ast.FINAL) + self.comments.AfterBlock() + } + + node.RightBrace = self.expect(token.RIGHT_BRACE) + + // Find comments after the trailing brace + if self.mode&StoreComments != 0 { + self.comments.ResetLineBreak() + self.comments.CommentMap.AddComments(node, self.comments.Fetch(), ast.TRAILING) + } + + return node +} + +func (self *_parser) parseEmptyStatement() ast.Statement { + idx := self.expect(token.SEMICOLON) + return &ast.EmptyStatement{Semicolon: idx} +} + +func (self *_parser) parseStatementList() (list []ast.Statement) { + for self.token != token.RIGHT_BRACE && self.token != token.EOF { + statement := self.parseStatement() + list = append(list, statement) + } + + return +} + +func (self *_parser) parseStatement() ast.Statement { + + if self.token == token.EOF { + self.errorUnexpectedToken(self.token) + return &ast.BadStatement{From: self.idx, To: self.idx + 1} + } + + if self.mode&StoreComments != 0 { + self.comments.ResetLineBreak() + } + + switch self.token { + case token.SEMICOLON: + return self.parseEmptyStatement() + case token.LEFT_BRACE: + return self.parseBlockStatement() + case token.IF: + return self.parseIfStatement() + case token.DO: + statement := self.parseDoWhileStatement() + self.comments.PostProcessNode(statement) + return statement + case token.WHILE: + return self.parseWhileStatement() + case token.FOR: + return self.parseForOrForInStatement() + case token.BREAK: + return self.parseBreakStatement() + case token.CONTINUE: + return self.parseContinueStatement() + case token.DEBUGGER: + return self.parseDebuggerStatement() + case token.WITH: + return self.parseWithStatement() + case token.VAR: + return self.parseVariableStatement() + case token.FUNCTION: + return self.parseFunctionStatement() + case token.SWITCH: + return self.parseSwitchStatement() + case token.RETURN: + return self.parseReturnStatement() + case token.THROW: + return self.parseThrowStatement() + case token.TRY: + return self.parseTryStatement() + } + + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + + expression := self.parseExpression() + + if identifier, isIdentifier := expression.(*ast.Identifier); isIdentifier && self.token == token.COLON { + // LabelledStatement + colon := self.idx + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() // : + + label := identifier.Name + for _, value := range self.scope.labels { + if label == value { + self.error(identifier.Idx0(), "Label '%s' already exists", label) + } + } + var labelComments []*ast.Comment + if self.mode&StoreComments != 0 { + labelComments = self.comments.FetchAll() + } + self.scope.labels = append(self.scope.labels, label) // Push the label + statement := self.parseStatement() + self.scope.labels = self.scope.labels[:len(self.scope.labels)-1] // Pop the label + exp := &ast.LabelledStatement{ + Label: identifier, + Colon: colon, + Statement: statement, + } + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(exp, labelComments, ast.LEADING) + } + + return exp + } + + self.optionalSemicolon() + + statement := &ast.ExpressionStatement{ + Expression: expression, + } + + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(statement, comments, ast.LEADING) + } + return statement +} + +func (self *_parser) parseTryStatement() ast.Statement { + var tryComments []*ast.Comment + if self.mode&StoreComments != 0 { + tryComments = self.comments.FetchAll() + } + node := &ast.TryStatement{ + Try: self.expect(token.TRY), + Body: self.parseBlockStatement(), + } + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node, tryComments, ast.LEADING) + self.comments.CommentMap.AddComments(node.Body, self.comments.FetchAll(), ast.TRAILING) + } + + if self.token == token.CATCH { + catch := self.idx + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + self.expect(token.LEFT_PARENTHESIS) + if self.token != token.IDENTIFIER { + self.expect(token.IDENTIFIER) + self.nextStatement() + return &ast.BadStatement{From: catch, To: self.idx} + } else { + identifier := self.parseIdentifier() + self.expect(token.RIGHT_PARENTHESIS) + node.Catch = &ast.CatchStatement{ + Catch: catch, + Parameter: identifier, + Body: self.parseBlockStatement(), + } + + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node.Catch.Body, self.comments.FetchAll(), ast.TRAILING) + } + } + } + + if self.token == token.FINALLY { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() + if self.mode&StoreComments != 0 { + tryComments = self.comments.FetchAll() + } + + node.Finally = self.parseBlockStatement() + + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node.Finally, tryComments, ast.LEADING) + } + } + + if node.Catch == nil && node.Finally == nil { + self.error(node.Try, "Missing catch or finally after try") + return &ast.BadStatement{From: node.Try, To: node.Body.Idx1()} + } + + return node +} + +func (self *_parser) parseFunctionParameterList() *ast.ParameterList { + opening := self.expect(token.LEFT_PARENTHESIS) + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + var list []*ast.Identifier + for self.token != token.RIGHT_PARENTHESIS && self.token != token.EOF { + if self.token != token.IDENTIFIER { + self.expect(token.IDENTIFIER) + } else { + identifier := self.parseIdentifier() + list = append(list, identifier) + } + if self.token != token.RIGHT_PARENTHESIS { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.expect(token.COMMA) + } + } + closing := self.expect(token.RIGHT_PARENTHESIS) + + return &ast.ParameterList{ + Opening: opening, + List: list, + Closing: closing, + } +} + +func (self *_parser) parseParameterList() (list []string) { + for self.token != token.EOF { + if self.token != token.IDENTIFIER { + self.expect(token.IDENTIFIER) + } + list = append(list, self.literal) + self.next() + if self.token != token.EOF { + self.expect(token.COMMA) + } + } + return +} + +func (self *_parser) parseFunctionStatement() *ast.FunctionStatement { + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + function := &ast.FunctionStatement{ + Function: self.parseFunction(true), + } + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(function, comments, ast.LEADING) + } + + return function +} + +func (self *_parser) parseFunction(declaration bool) *ast.FunctionLiteral { + + node := &ast.FunctionLiteral{ + Function: self.expect(token.FUNCTION), + } + + var name *ast.Identifier + if self.token == token.IDENTIFIER { + name = self.parseIdentifier() + if declaration { + self.scope.declare(&ast.FunctionDeclaration{ + Function: node, + }) + } + } else if declaration { + // Use expect error handling + self.expect(token.IDENTIFIER) + } + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + node.Name = name + node.ParameterList = self.parseFunctionParameterList() + self.parseFunctionBlock(node) + node.Source = self.slice(node.Idx0(), node.Idx1()) + + return node +} + +func (self *_parser) parseFunctionBlock(node *ast.FunctionLiteral) { + { + self.openScope() + inFunction := self.scope.inFunction + self.scope.inFunction = true + defer func() { + self.scope.inFunction = inFunction + self.closeScope() + }() + node.Body = self.parseBlockStatement() + node.DeclarationList = self.scope.declarationList + } +} + +func (self *_parser) parseDebuggerStatement() ast.Statement { + idx := self.expect(token.DEBUGGER) + + node := &ast.DebuggerStatement{ + Debugger: idx, + } + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node, self.comments.FetchAll(), ast.TRAILING) + } + + self.semicolon() + return node +} + +func (self *_parser) parseReturnStatement() ast.Statement { + idx := self.expect(token.RETURN) + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + + if !self.scope.inFunction { + self.error(idx, "Illegal return statement") + self.nextStatement() + return &ast.BadStatement{From: idx, To: self.idx} + } + + node := &ast.ReturnStatement{ + Return: idx, + } + + if !self.implicitSemicolon && self.token != token.SEMICOLON && self.token != token.RIGHT_BRACE && self.token != token.EOF { + node.Argument = self.parseExpression() + } + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node, comments, ast.LEADING) + } + + self.semicolon() + + return node +} + +func (self *_parser) parseThrowStatement() ast.Statement { + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + idx := self.expect(token.THROW) + + if self.implicitSemicolon { + if self.chr == -1 { // Hackish + self.error(idx, "Unexpected end of input") + } else { + self.error(idx, "Illegal newline after throw") + } + self.nextStatement() + return &ast.BadStatement{From: idx, To: self.idx} + } + + node := &ast.ThrowStatement{ + Argument: self.parseExpression(), + } + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node, comments, ast.LEADING) + } + + self.semicolon() + + return node +} + +func (self *_parser) parseSwitchStatement() ast.Statement { + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + self.expect(token.SWITCH) + if self.mode&StoreComments != 0 { + comments = append(comments, self.comments.FetchAll()...) + } + self.expect(token.LEFT_PARENTHESIS) + node := &ast.SwitchStatement{ + Discriminant: self.parseExpression(), + Default: -1, + } + self.expect(token.RIGHT_PARENTHESIS) + if self.mode&StoreComments != 0 { + comments = append(comments, self.comments.FetchAll()...) + } + + self.expect(token.LEFT_BRACE) + + inSwitch := self.scope.inSwitch + self.scope.inSwitch = true + defer func() { + self.scope.inSwitch = inSwitch + }() + + for index := 0; self.token != token.EOF; index++ { + if self.token == token.RIGHT_BRACE { + self.next() + break + } + + clause := self.parseCaseStatement() + if clause.Test == nil { + if node.Default != -1 { + self.error(clause.Case, "Already saw a default in switch") + } + node.Default = index + } + node.Body = append(node.Body, clause) + } + + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node, comments, ast.LEADING) + } + + return node +} + +func (self *_parser) parseWithStatement() ast.Statement { + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + self.expect(token.WITH) + var withComments []*ast.Comment + if self.mode&StoreComments != 0 { + withComments = self.comments.FetchAll() + } + + self.expect(token.LEFT_PARENTHESIS) + + node := &ast.WithStatement{ + Object: self.parseExpression(), + } + self.expect(token.RIGHT_PARENTHESIS) + + if self.mode&StoreComments != 0 { + //comments = append(comments, self.comments.FetchAll()...) + self.comments.CommentMap.AddComments(node, comments, ast.LEADING) + self.comments.CommentMap.AddComments(node, withComments, ast.WITH) + } + + node.Body = self.parseStatement() + + return node +} + +func (self *_parser) parseCaseStatement() *ast.CaseStatement { + node := &ast.CaseStatement{ + Case: self.idx, + } + + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + self.comments.Unset() + } + + if self.token == token.DEFAULT { + self.next() + } else { + self.expect(token.CASE) + node.Test = self.parseExpression() + } + + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.expect(token.COLON) + + for { + if self.token == token.EOF || + self.token == token.RIGHT_BRACE || + self.token == token.CASE || + self.token == token.DEFAULT { + break + } + consequent := self.parseStatement() + node.Consequent = append(node.Consequent, consequent) + } + + // Link the comments to the case statement + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node, comments, ast.LEADING) + } + + return node +} + +func (self *_parser) parseIterationStatement() ast.Statement { + inIteration := self.scope.inIteration + self.scope.inIteration = true + defer func() { + self.scope.inIteration = inIteration + }() + return self.parseStatement() +} + +func (self *_parser) parseForIn(into ast.Expression) *ast.ForInStatement { + + // Already have consumed " in" + + source := self.parseExpression() + self.expect(token.RIGHT_PARENTHESIS) + body := self.parseIterationStatement() + + forin := &ast.ForInStatement{ + Into: into, + Source: source, + Body: body, + } + + return forin +} + +func (self *_parser) parseFor(initializer ast.Expression) *ast.ForStatement { + + // Already have consumed " ;" + + var test, update ast.Expression + + if self.token != token.SEMICOLON { + test = self.parseExpression() + } + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.expect(token.SEMICOLON) + + if self.token != token.RIGHT_PARENTHESIS { + update = self.parseExpression() + } + self.expect(token.RIGHT_PARENTHESIS) + body := self.parseIterationStatement() + + forstatement := &ast.ForStatement{ + Initializer: initializer, + Test: test, + Update: update, + Body: body, + } + + return forstatement +} + +func (self *_parser) parseForOrForInStatement() ast.Statement { + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + idx := self.expect(token.FOR) + var forComments []*ast.Comment + if self.mode&StoreComments != 0 { + forComments = self.comments.FetchAll() + } + self.expect(token.LEFT_PARENTHESIS) + + var left []ast.Expression + + forIn := false + if self.token != token.SEMICOLON { + + allowIn := self.scope.allowIn + self.scope.allowIn = false + if self.token == token.VAR { + var_ := self.idx + var varComments []*ast.Comment + if self.mode&StoreComments != 0 { + varComments = self.comments.FetchAll() + self.comments.Unset() + } + self.next() + list := self.parseVariableDeclarationList(var_) + if len(list) == 1 && self.token == token.IN { + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.next() // in + forIn = true + left = []ast.Expression{list[0]} // There is only one declaration + } else { + left = list + } + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(left[0], varComments, ast.LEADING) + } + } else { + left = append(left, self.parseExpression()) + if self.token == token.IN { + self.next() + forIn = true + } + } + self.scope.allowIn = allowIn + } + + if forIn { + switch left[0].(type) { + case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression, *ast.VariableExpression: + // These are all acceptable + default: + self.error(idx, "Invalid left-hand side in for-in") + self.nextStatement() + return &ast.BadStatement{From: idx, To: self.idx} + } + + forin := self.parseForIn(left[0]) + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(forin, comments, ast.LEADING) + self.comments.CommentMap.AddComments(forin, forComments, ast.FOR) + } + return forin + } + + if self.mode&StoreComments != 0 { + self.comments.Unset() + } + self.expect(token.SEMICOLON) + initializer := &ast.SequenceExpression{Sequence: left} + forstatement := self.parseFor(initializer) + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(forstatement, comments, ast.LEADING) + self.comments.CommentMap.AddComments(forstatement, forComments, ast.FOR) + } + return forstatement +} + +func (self *_parser) parseVariableStatement() *ast.VariableStatement { + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + idx := self.expect(token.VAR) + + list := self.parseVariableDeclarationList(idx) + + statement := &ast.VariableStatement{ + Var: idx, + List: list, + } + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(statement, comments, ast.LEADING) + self.comments.Unset() + } + self.semicolon() + + return statement +} + +func (self *_parser) parseDoWhileStatement() ast.Statement { + inIteration := self.scope.inIteration + self.scope.inIteration = true + defer func() { + self.scope.inIteration = inIteration + }() + + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + self.expect(token.DO) + var doComments []*ast.Comment + if self.mode&StoreComments != 0 { + doComments = self.comments.FetchAll() + } + + node := &ast.DoWhileStatement{} + if self.token == token.LEFT_BRACE { + node.Body = self.parseBlockStatement() + } else { + node.Body = self.parseStatement() + } + + self.expect(token.WHILE) + var whileComments []*ast.Comment + if self.mode&StoreComments != 0 { + whileComments = self.comments.FetchAll() + } + self.expect(token.LEFT_PARENTHESIS) + node.Test = self.parseExpression() + self.expect(token.RIGHT_PARENTHESIS) + + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node, comments, ast.LEADING) + self.comments.CommentMap.AddComments(node, doComments, ast.DO) + self.comments.CommentMap.AddComments(node, whileComments, ast.WHILE) + } + + return node +} + +func (self *_parser) parseWhileStatement() ast.Statement { + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + self.expect(token.WHILE) + + var whileComments []*ast.Comment + if self.mode&StoreComments != 0 { + whileComments = self.comments.FetchAll() + } + + self.expect(token.LEFT_PARENTHESIS) + node := &ast.WhileStatement{ + Test: self.parseExpression(), + } + self.expect(token.RIGHT_PARENTHESIS) + node.Body = self.parseIterationStatement() + + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node, comments, ast.LEADING) + self.comments.CommentMap.AddComments(node, whileComments, ast.WHILE) + } + + return node +} + +func (self *_parser) parseIfStatement() ast.Statement { + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + self.expect(token.IF) + var ifComments []*ast.Comment + if self.mode&StoreComments != 0 { + ifComments = self.comments.FetchAll() + } + + self.expect(token.LEFT_PARENTHESIS) + node := &ast.IfStatement{ + Test: self.parseExpression(), + } + self.expect(token.RIGHT_PARENTHESIS) + if self.token == token.LEFT_BRACE { + node.Consequent = self.parseBlockStatement() + } else { + node.Consequent = self.parseStatement() + } + + if self.token == token.ELSE { + self.next() + node.Alternate = self.parseStatement() + } + + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(node, comments, ast.LEADING) + self.comments.CommentMap.AddComments(node, ifComments, ast.IF) + } + + return node +} + +func (self *_parser) parseSourceElement() ast.Statement { + statement := self.parseStatement() + //self.comments.Unset() + return statement +} + +func (self *_parser) parseSourceElements() []ast.Statement { + body := []ast.Statement(nil) + + for { + if self.token != token.STRING { + break + } + body = append(body, self.parseSourceElement()) + } + + for self.token != token.EOF { + body = append(body, self.parseSourceElement()) + } + + return body +} + +func (self *_parser) parseProgram() *ast.Program { + self.openScope() + defer self.closeScope() + return &ast.Program{ + Body: self.parseSourceElements(), + DeclarationList: self.scope.declarationList, + File: self.file, + } +} + +func (self *_parser) parseBreakStatement() ast.Statement { + var comments []*ast.Comment + if self.mode&StoreComments != 0 { + comments = self.comments.FetchAll() + } + idx := self.expect(token.BREAK) + semicolon := self.implicitSemicolon + if self.token == token.SEMICOLON { + semicolon = true + self.next() + } + + if semicolon || self.token == token.RIGHT_BRACE { + self.implicitSemicolon = false + if !self.scope.inIteration && !self.scope.inSwitch { + goto illegal + } + breakStatement := &ast.BranchStatement{ + Idx: idx, + Token: token.BREAK, + } + + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(breakStatement, comments, ast.LEADING) + self.comments.CommentMap.AddComments(breakStatement, self.comments.FetchAll(), ast.TRAILING) + } + + return breakStatement + } + + if self.token == token.IDENTIFIER { + identifier := self.parseIdentifier() + if !self.scope.hasLabel(identifier.Name) { + self.error(idx, "Undefined label '%s'", identifier.Name) + return &ast.BadStatement{From: idx, To: identifier.Idx1()} + } + self.semicolon() + breakStatement := &ast.BranchStatement{ + Idx: idx, + Token: token.BREAK, + Label: identifier, + } + if self.mode&StoreComments != 0 { + self.comments.CommentMap.AddComments(breakStatement, comments, ast.LEADING) + } + + return breakStatement + } + + self.expect(token.IDENTIFIER) + +illegal: + self.error(idx, "Illegal break statement") + self.nextStatement() + return &ast.BadStatement{From: idx, To: self.idx} +} + +func (self *_parser) parseContinueStatement() ast.Statement { + idx := self.expect(token.CONTINUE) + semicolon := self.implicitSemicolon + if self.token == token.SEMICOLON { + semicolon = true + self.next() + } + + if semicolon || self.token == token.RIGHT_BRACE { + self.implicitSemicolon = false + if !self.scope.inIteration { + goto illegal + } + return &ast.BranchStatement{ + Idx: idx, + Token: token.CONTINUE, + } + } + + if self.token == token.IDENTIFIER { + identifier := self.parseIdentifier() + if !self.scope.hasLabel(identifier.Name) { + self.error(idx, "Undefined label '%s'", identifier.Name) + return &ast.BadStatement{From: idx, To: identifier.Idx1()} + } + if !self.scope.inIteration { + goto illegal + } + self.semicolon() + return &ast.BranchStatement{ + Idx: idx, + Token: token.CONTINUE, + Label: identifier, + } + } + + self.expect(token.IDENTIFIER) + +illegal: + self.error(idx, "Illegal continue statement") + self.nextStatement() + return &ast.BadStatement{From: idx, To: self.idx} +} + +// Find the next statement after an error (recover) +func (self *_parser) nextStatement() { + for { + switch self.token { + case token.BREAK, token.CONTINUE, + token.FOR, token.IF, token.RETURN, token.SWITCH, + token.VAR, token.DO, token.TRY, token.WITH, + token.WHILE, token.THROW, token.CATCH, token.FINALLY: + // Return only if parser made some progress since last + // sync or if it has not reached 10 next calls without + // progress. Otherwise consume at least one token to + // avoid an endless parser loop + if self.idx == self.recover.idx && self.recover.count < 10 { + self.recover.count++ + return + } + if self.idx > self.recover.idx { + self.recover.idx = self.idx + self.recover.count = 0 + return + } + // Reaching here indicates a parser bug, likely an + // incorrect token list in this function, but it only + // leads to skipping of possibly correct code if a + // previous error is present, and thus is preferred + // over a non-terminating parse. + case token.EOF: + return + } + self.next() + } +} diff --git a/vendor/github.com/robertkrimen/otto/property.go b/vendor/github.com/robertkrimen/otto/property.go new file mode 100644 index 000000000..5445eccde --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/property.go @@ -0,0 +1,220 @@ +package otto + +// property + +type _propertyMode int + +const ( + modeWriteMask _propertyMode = 0700 + modeEnumerateMask = 0070 + modeConfigureMask = 0007 + modeOnMask = 0111 + modeOffMask = 0000 + modeSetMask = 0222 // If value is 2, then mode is neither "On" nor "Off" +) + +type _propertyGetSet [2]*_object + +var _nilGetSetObject _object = _object{} + +type _property struct { + value interface{} + mode _propertyMode +} + +func (self _property) writable() bool { + return self.mode&modeWriteMask == modeWriteMask&modeOnMask +} + +func (self *_property) writeOn() { + self.mode = (self.mode & ^modeWriteMask) | (modeWriteMask & modeOnMask) +} + +func (self *_property) writeOff() { + self.mode &= ^modeWriteMask +} + +func (self *_property) writeClear() { + self.mode = (self.mode & ^modeWriteMask) | (modeWriteMask & modeSetMask) +} + +func (self _property) writeSet() bool { + return 0 == self.mode&modeWriteMask&modeSetMask +} + +func (self _property) enumerable() bool { + return self.mode&modeEnumerateMask == modeEnumerateMask&modeOnMask +} + +func (self *_property) enumerateOn() { + self.mode = (self.mode & ^modeEnumerateMask) | (modeEnumerateMask & modeOnMask) +} + +func (self *_property) enumerateOff() { + self.mode &= ^modeEnumerateMask +} + +func (self _property) enumerateSet() bool { + return 0 == self.mode&modeEnumerateMask&modeSetMask +} + +func (self _property) configurable() bool { + return self.mode&modeConfigureMask == modeConfigureMask&modeOnMask +} + +func (self *_property) configureOn() { + self.mode = (self.mode & ^modeConfigureMask) | (modeConfigureMask & modeOnMask) +} + +func (self *_property) configureOff() { + self.mode &= ^modeConfigureMask +} + +func (self _property) configureSet() bool { + return 0 == self.mode&modeConfigureMask&modeSetMask +} + +func (self _property) copy() *_property { + property := self + return &property +} + +func (self _property) get(this *_object) Value { + switch value := self.value.(type) { + case Value: + return value + case _propertyGetSet: + if value[0] != nil { + return value[0].call(toValue(this), nil, false, nativeFrame) + } + } + return Value{} +} + +func (self _property) isAccessorDescriptor() bool { + setGet, test := self.value.(_propertyGetSet) + return test && (setGet[0] != nil || setGet[1] != nil) +} + +func (self _property) isDataDescriptor() bool { + if self.writeSet() { // Either "On" or "Off" + return true + } + value, valid := self.value.(Value) + return valid && !value.isEmpty() +} + +func (self _property) isGenericDescriptor() bool { + return !(self.isDataDescriptor() || self.isAccessorDescriptor()) +} + +func (self _property) isEmpty() bool { + return self.mode == 0222 && self.isGenericDescriptor() +} + +// _enumerableValue, _enumerableTrue, _enumerableFalse? +// .enumerableValue() .enumerableExists() + +func toPropertyDescriptor(rt *_runtime, value Value) (descriptor _property) { + objectDescriptor := value._object() + if objectDescriptor == nil { + panic(rt.panicTypeError()) + } + + { + descriptor.mode = modeSetMask // Initially nothing is set + if objectDescriptor.hasProperty("enumerable") { + if objectDescriptor.get("enumerable").bool() { + descriptor.enumerateOn() + } else { + descriptor.enumerateOff() + } + } + + if objectDescriptor.hasProperty("configurable") { + if objectDescriptor.get("configurable").bool() { + descriptor.configureOn() + } else { + descriptor.configureOff() + } + } + + if objectDescriptor.hasProperty("writable") { + if objectDescriptor.get("writable").bool() { + descriptor.writeOn() + } else { + descriptor.writeOff() + } + } + } + + var getter, setter *_object + getterSetter := false + + if objectDescriptor.hasProperty("get") { + value := objectDescriptor.get("get") + if value.IsDefined() { + if !value.isCallable() { + panic(rt.panicTypeError()) + } + getter = value._object() + getterSetter = true + } else { + getter = &_nilGetSetObject + getterSetter = true + } + } + + if objectDescriptor.hasProperty("set") { + value := objectDescriptor.get("set") + if value.IsDefined() { + if !value.isCallable() { + panic(rt.panicTypeError()) + } + setter = value._object() + getterSetter = true + } else { + setter = &_nilGetSetObject + getterSetter = true + } + } + + if getterSetter { + if descriptor.writeSet() { + panic(rt.panicTypeError()) + } + descriptor.value = _propertyGetSet{getter, setter} + } + + if objectDescriptor.hasProperty("value") { + if getterSetter { + panic(rt.panicTypeError()) + } + descriptor.value = objectDescriptor.get("value") + } + + return +} + +func (self *_runtime) fromPropertyDescriptor(descriptor _property) *_object { + object := self.newObject() + if descriptor.isDataDescriptor() { + object.defineProperty("value", descriptor.value.(Value), 0111, false) + object.defineProperty("writable", toValue_bool(descriptor.writable()), 0111, false) + } else if descriptor.isAccessorDescriptor() { + getSet := descriptor.value.(_propertyGetSet) + get := Value{} + if getSet[0] != nil { + get = toValue_object(getSet[0]) + } + set := Value{} + if getSet[1] != nil { + set = toValue_object(getSet[1]) + } + object.defineProperty("get", get, 0111, false) + object.defineProperty("set", set, 0111, false) + } + object.defineProperty("enumerable", toValue_bool(descriptor.enumerable()), 0111, false) + object.defineProperty("configurable", toValue_bool(descriptor.configurable()), 0111, false) + return object +} diff --git a/vendor/github.com/robertkrimen/otto/registry/README.markdown b/vendor/github.com/robertkrimen/otto/registry/README.markdown new file mode 100644 index 000000000..ba2d38909 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/registry/README.markdown @@ -0,0 +1,51 @@ +# registry +-- + import "github.com/robertkrimen/otto/registry" + +Package registry is an expirmental package to facillitate altering the otto +runtime via import. + +This interface can change at any time. + +## Usage + +#### func Apply + +```go +func Apply(callback func(Entry)) +``` + +#### type Entry + +```go +type Entry struct { +} +``` + + +#### func Register + +```go +func Register(source func() string) *Entry +``` + +#### func (*Entry) Disable + +```go +func (self *Entry) Disable() +``` + +#### func (*Entry) Enable + +```go +func (self *Entry) Enable() +``` + +#### func (Entry) Source + +```go +func (self Entry) Source() string +``` + +-- +**godocdown** http://github.com/robertkrimen/godocdown diff --git a/vendor/github.com/robertkrimen/otto/registry/registry.go b/vendor/github.com/robertkrimen/otto/registry/registry.go new file mode 100644 index 000000000..966638ac4 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/registry/registry.go @@ -0,0 +1,47 @@ +/* +Package registry is an expirmental package to facillitate altering the otto runtime via import. + +This interface can change at any time. +*/ +package registry + +var registry []*Entry = make([]*Entry, 0) + +type Entry struct { + active bool + source func() string +} + +func newEntry(source func() string) *Entry { + return &Entry{ + active: true, + source: source, + } +} + +func (self *Entry) Enable() { + self.active = true +} + +func (self *Entry) Disable() { + self.active = false +} + +func (self Entry) Source() string { + return self.source() +} + +func Apply(callback func(Entry)) { + for _, entry := range registry { + if !entry.active { + continue + } + callback(*entry) + } +} + +func Register(source func() string) *Entry { + entry := newEntry(source) + registry = append(registry, entry) + return entry +} diff --git a/vendor/github.com/robertkrimen/otto/result.go b/vendor/github.com/robertkrimen/otto/result.go new file mode 100644 index 000000000..63642e7d0 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/result.go @@ -0,0 +1,30 @@ +package otto + +import () + +type _resultKind int + +const ( + resultNormal _resultKind = iota + resultReturn + resultBreak + resultContinue +) + +type _result struct { + kind _resultKind + value Value + target string +} + +func newReturnResult(value Value) _result { + return _result{resultReturn, value, ""} +} + +func newContinueResult(target string) _result { + return _result{resultContinue, emptyValue, target} +} + +func newBreakResult(target string) _result { + return _result{resultBreak, emptyValue, target} +} diff --git a/vendor/github.com/robertkrimen/otto/runtime.go b/vendor/github.com/robertkrimen/otto/runtime.go new file mode 100644 index 000000000..6ebb1f982 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/runtime.go @@ -0,0 +1,679 @@ +package otto + +import ( + "errors" + "fmt" + "math" + "path" + "reflect" + "runtime" + "strconv" + "sync" + + "github.com/robertkrimen/otto/ast" + "github.com/robertkrimen/otto/parser" +) + +type _global struct { + Object *_object // Object( ... ), new Object( ... ) - 1 (length) + Function *_object // Function( ... ), new Function( ... ) - 1 + Array *_object // Array( ... ), new Array( ... ) - 1 + String *_object // String( ... ), new String( ... ) - 1 + Boolean *_object // Boolean( ... ), new Boolean( ... ) - 1 + Number *_object // Number( ... ), new Number( ... ) - 1 + Math *_object + Date *_object // Date( ... ), new Date( ... ) - 7 + RegExp *_object // RegExp( ... ), new RegExp( ... ) - 2 + Error *_object // Error( ... ), new Error( ... ) - 1 + EvalError *_object + TypeError *_object + RangeError *_object + ReferenceError *_object + SyntaxError *_object + URIError *_object + JSON *_object + + ObjectPrototype *_object // Object.prototype + FunctionPrototype *_object // Function.prototype + ArrayPrototype *_object // Array.prototype + StringPrototype *_object // String.prototype + BooleanPrototype *_object // Boolean.prototype + NumberPrototype *_object // Number.prototype + DatePrototype *_object // Date.prototype + RegExpPrototype *_object // RegExp.prototype + ErrorPrototype *_object // Error.prototype + EvalErrorPrototype *_object + TypeErrorPrototype *_object + RangeErrorPrototype *_object + ReferenceErrorPrototype *_object + SyntaxErrorPrototype *_object + URIErrorPrototype *_object +} + +type _runtime struct { + global _global + globalObject *_object + globalStash *_objectStash + scope *_scope + otto *Otto + eval *_object // The builtin eval, for determine indirect versus direct invocation + debugger func(*Otto) + random func() float64 + stackLimit int + traceLimit int + + labels []string // FIXME + lck sync.Mutex +} + +func (self *_runtime) enterScope(scope *_scope) { + scope.outer = self.scope + if self.scope != nil { + if self.stackLimit != 0 && self.scope.depth+1 >= self.stackLimit { + panic(self.panicRangeError("Maximum call stack size exceeded")) + } + + scope.depth = self.scope.depth + 1 + } + + self.scope = scope +} + +func (self *_runtime) leaveScope() { + self.scope = self.scope.outer +} + +// FIXME This is used in two places (cloning) +func (self *_runtime) enterGlobalScope() { + self.enterScope(newScope(self.globalStash, self.globalStash, self.globalObject)) +} + +func (self *_runtime) enterFunctionScope(outer _stash, this Value) *_fnStash { + if outer == nil { + outer = self.globalStash + } + stash := self.newFunctionStash(outer) + var thisObject *_object + switch this.kind { + case valueUndefined, valueNull: + thisObject = self.globalObject + default: + thisObject = self.toObject(this) + } + self.enterScope(newScope(stash, stash, thisObject)) + return stash +} + +func (self *_runtime) putValue(reference _reference, value Value) { + name := reference.putValue(value) + if name != "" { + // Why? -- If reference.base == nil + // strict = false + self.globalObject.defineProperty(name, value, 0111, false) + } +} + +func (self *_runtime) tryCatchEvaluate(inner func() Value) (tryValue Value, exception bool) { + // resultValue = The value of the block (e.g. the last statement) + // throw = Something was thrown + // throwValue = The value of what was thrown + // other = Something that changes flow (return, break, continue) that is not a throw + // Otherwise, some sort of unknown panic happened, we'll just propagate it + defer func() { + if caught := recover(); caught != nil { + if exception, ok := caught.(*_exception); ok { + caught = exception.eject() + } + switch caught := caught.(type) { + case _error: + exception = true + tryValue = toValue_object(self.newError(caught.name, caught.messageValue(), 0)) + case Value: + exception = true + tryValue = caught + default: + panic(caught) + } + } + }() + + tryValue = inner() + return +} + +// toObject + +func (self *_runtime) toObject(value Value) *_object { + switch value.kind { + case valueEmpty, valueUndefined, valueNull: + panic(self.panicTypeError()) + case valueBoolean: + return self.newBoolean(value) + case valueString: + return self.newString(value) + case valueNumber: + return self.newNumber(value) + case valueObject: + return value._object() + } + panic(self.panicTypeError()) +} + +func (self *_runtime) objectCoerce(value Value) (*_object, error) { + switch value.kind { + case valueUndefined: + return nil, errors.New("undefined") + case valueNull: + return nil, errors.New("null") + case valueBoolean: + return self.newBoolean(value), nil + case valueString: + return self.newString(value), nil + case valueNumber: + return self.newNumber(value), nil + case valueObject: + return value._object(), nil + } + panic(self.panicTypeError()) +} + +func checkObjectCoercible(rt *_runtime, value Value) { + isObject, mustCoerce := testObjectCoercible(value) + if !isObject && !mustCoerce { + panic(rt.panicTypeError()) + } +} + +// testObjectCoercible + +func testObjectCoercible(value Value) (isObject bool, mustCoerce bool) { + switch value.kind { + case valueReference, valueEmpty, valueNull, valueUndefined: + return false, false + case valueNumber, valueString, valueBoolean: + return false, true + case valueObject: + return true, false + default: + panic("this should never happen") + } +} + +func (self *_runtime) safeToValue(value interface{}) (Value, error) { + result := Value{} + err := catchPanic(func() { + result = self.toValue(value) + }) + return result, err +} + +// convertNumeric converts numeric parameter val from js to that of type t if it is safe to do so, otherwise it panics. +// This allows literals (int64), bitwise values (int32) and the general form (float64) of javascript numerics to be passed as parameters to go functions easily. +func (self *_runtime) convertNumeric(v Value, t reflect.Type) reflect.Value { + val := reflect.ValueOf(v.export()) + + if val.Kind() == t.Kind() { + return val + } + + if val.Kind() == reflect.Interface { + val = reflect.ValueOf(val.Interface()) + } + + switch val.Kind() { + case reflect.Float32, reflect.Float64: + f64 := val.Float() + switch t.Kind() { + case reflect.Float64: + return reflect.ValueOf(f64) + case reflect.Float32: + if reflect.Zero(t).OverflowFloat(f64) { + panic(self.panicRangeError("converting float64 to float32 would overflow")) + } + + return val.Convert(t) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i64 := int64(f64) + if float64(i64) != f64 { + panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would cause loss of precision", val.Type(), t))) + } + + // The float represents an integer + val = reflect.ValueOf(i64) + default: + panic(self.panicTypeError(fmt.Sprintf("cannot convert %v to %v", val.Type(), t))) + } + } + + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i64 := val.Int() + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if reflect.Zero(t).OverflowInt(i64) { + panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would overflow", val.Type(), t))) + } + return val.Convert(t) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if i64 < 0 { + panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would underflow", val.Type(), t))) + } + if reflect.Zero(t).OverflowUint(uint64(i64)) { + panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would overflow", val.Type(), t))) + } + return val.Convert(t) + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if u64 > math.MaxInt64 || reflect.Zero(t).OverflowInt(int64(u64)) { + panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would overflow", val.Type(), t))) + } + return val.Convert(t) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if reflect.Zero(t).OverflowUint(u64) { + panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would overflow", val.Type(), t))) + } + return val.Convert(t) + } + } + + panic(self.panicTypeError(fmt.Sprintf("unsupported type %v for numeric conversion", val.Type()))) +} + +var typeOfValue = reflect.TypeOf(Value{}) + +// convertCallParameter converts request val to type t if possible. +// If the conversion fails due to overflow or type miss-match then it panics. +// If no conversion is known then the original value is returned. +func (self *_runtime) convertCallParameter(v Value, t reflect.Type) reflect.Value { + if t == typeOfValue { + return reflect.ValueOf(v) + } + + if v.kind == valueObject { + if gso, ok := v._object().value.(*_goStructObject); ok { + if gso.value.Type().AssignableTo(t) { + return gso.value + } + } + } + + if t.Kind() == reflect.Interface { + iv := reflect.ValueOf(v.export()) + if iv.Type().AssignableTo(t) { + return iv + } + } + + tk := t.Kind() + + if tk == reflect.Ptr { + switch v.kind { + case valueEmpty, valueNull, valueUndefined: + return reflect.Zero(t) + default: + var vv reflect.Value + if err := catchPanic(func() { vv = self.convertCallParameter(v, t.Elem()) }); err == nil { + if vv.CanAddr() { + return vv.Addr() + } + + pv := reflect.New(vv.Type()) + pv.Elem().Set(vv) + return pv + } + } + } + + switch tk { + case reflect.Bool: + return reflect.ValueOf(v.bool()) + case reflect.String: + switch v.kind { + case valueString: + return reflect.ValueOf(v.value) + case valueNumber: + return reflect.ValueOf(fmt.Sprintf("%v", v.value)) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64: + switch v.kind { + case valueNumber: + return self.convertNumeric(v, t) + } + case reflect.Slice: + if o := v._object(); o != nil { + if lv := o.get("length"); lv.IsNumber() { + l := lv.number().int64 + + s := reflect.MakeSlice(t, int(l), int(l)) + + tt := t.Elem() + + for i := int64(0); i < l; i++ { + p, ok := o.property[strconv.FormatInt(i, 10)] + if !ok { + continue + } + + e, ok := p.value.(Value) + if !ok { + continue + } + + ev := self.convertCallParameter(e, tt) + + s.Index(int(i)).Set(ev) + } + + return s + } + } + case reflect.Map: + if o := v._object(); o != nil && t.Key().Kind() == reflect.String { + m := reflect.MakeMap(t) + + o.enumerate(false, func(k string) bool { + m.SetMapIndex(reflect.ValueOf(k), self.convertCallParameter(o.get(k), t.Elem())) + return true + }) + + return m + } + case reflect.Func: + if t.NumOut() > 1 { + panic(self.panicTypeError("converting JavaScript values to Go functions with more than one return value is currently not supported")) + } + + if o := v._object(); o != nil && o.class == "Function" { + return reflect.MakeFunc(t, func(args []reflect.Value) []reflect.Value { + l := make([]interface{}, len(args)) + for i, a := range args { + if a.CanInterface() { + l[i] = a.Interface() + } + } + + rv, err := v.Call(nullValue, l...) + if err != nil { + panic(err) + } + + if t.NumOut() == 0 { + return nil + } + + return []reflect.Value{self.convertCallParameter(rv, t.Out(0))} + }) + } + } + + if tk == reflect.String { + if o := v._object(); o != nil && o.hasProperty("toString") { + if fn := o.get("toString"); fn.IsFunction() { + sv, err := fn.Call(v) + if err != nil { + panic(err) + } + + var r reflect.Value + if err := catchPanic(func() { r = self.convertCallParameter(sv, t) }); err == nil { + return r + } + } + } + + return reflect.ValueOf(v.String()) + } + + s := "OTTO DOES NOT UNDERSTAND THIS TYPE" + switch v.kind { + case valueBoolean: + s = "boolean" + case valueNull: + s = "null" + case valueNumber: + s = "number" + case valueString: + s = "string" + case valueUndefined: + s = "undefined" + case valueObject: + s = v.Class() + } + + panic(self.panicTypeError("can't convert from %q to %q", s, t.String())) +} + +func (self *_runtime) toValue(value interface{}) Value { + switch value := value.(type) { + case Value: + return value + case func(FunctionCall) Value: + var name, file string + var line int + pc := reflect.ValueOf(value).Pointer() + fn := runtime.FuncForPC(pc) + if fn != nil { + name = fn.Name() + file, line = fn.FileLine(pc) + file = path.Base(file) + } + return toValue_object(self.newNativeFunction(name, file, line, value)) + case _nativeFunction: + var name, file string + var line int + pc := reflect.ValueOf(value).Pointer() + fn := runtime.FuncForPC(pc) + if fn != nil { + name = fn.Name() + file, line = fn.FileLine(pc) + file = path.Base(file) + } + return toValue_object(self.newNativeFunction(name, file, line, value)) + case Object, *Object, _object, *_object: + // Nothing happens. + // FIXME We should really figure out what can come here. + // This catch-all is ugly. + default: + { + value := reflect.ValueOf(value) + + switch value.Kind() { + case reflect.Ptr: + switch reflect.Indirect(value).Kind() { + case reflect.Struct: + return toValue_object(self.newGoStructObject(value)) + case reflect.Array: + return toValue_object(self.newGoArray(value)) + } + case reflect.Struct: + return toValue_object(self.newGoStructObject(value)) + case reflect.Map: + return toValue_object(self.newGoMapObject(value)) + case reflect.Slice: + return toValue_object(self.newGoSlice(value)) + case reflect.Array: + return toValue_object(self.newGoArray(value)) + case reflect.Func: + var name, file string + var line int + if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr { + pc := v.Pointer() + fn := runtime.FuncForPC(pc) + if fn != nil { + name = fn.Name() + file, line = fn.FileLine(pc) + file = path.Base(file) + } + } + + typ := value.Type() + + return toValue_object(self.newNativeFunction(name, file, line, func(c FunctionCall) Value { + nargs := typ.NumIn() + + if len(c.ArgumentList) != nargs { + if typ.IsVariadic() { + if len(c.ArgumentList) < nargs-1 { + panic(self.panicRangeError(fmt.Sprintf("expected at least %d arguments; got %d", nargs-1, len(c.ArgumentList)))) + } + } else { + panic(self.panicRangeError(fmt.Sprintf("expected %d argument(s); got %d", nargs, len(c.ArgumentList)))) + } + } + + in := make([]reflect.Value, len(c.ArgumentList)) + + callSlice := false + + for i, a := range c.ArgumentList { + var t reflect.Type + + n := i + if n >= nargs-1 && typ.IsVariadic() { + if n > nargs-1 { + n = nargs - 1 + } + + t = typ.In(n).Elem() + } else { + t = typ.In(n) + } + + // if this is a variadic Go function, and the caller has supplied + // exactly the number of JavaScript arguments required, and this + // is the last JavaScript argument, try treating the it as the + // actual set of variadic Go arguments. if that succeeds, break + // out of the loop. + if typ.IsVariadic() && len(c.ArgumentList) == nargs && i == nargs-1 { + var v reflect.Value + if err := catchPanic(func() { v = self.convertCallParameter(a, typ.In(n)) }); err == nil { + in[i] = v + callSlice = true + break + } + } + + in[i] = self.convertCallParameter(a, t) + } + + var out []reflect.Value + if callSlice { + out = value.CallSlice(in) + } else { + out = value.Call(in) + } + + switch len(out) { + case 0: + return Value{} + case 1: + return self.toValue(out[0].Interface()) + default: + s := make([]interface{}, len(out)) + for i, v := range out { + s[i] = self.toValue(v.Interface()) + } + + return self.toValue(s) + } + })) + } + } + } + + return toValue(value) +} + +func (runtime *_runtime) newGoSlice(value reflect.Value) *_object { + self := runtime.newGoSliceObject(value) + self.prototype = runtime.global.ArrayPrototype + return self +} + +func (runtime *_runtime) newGoArray(value reflect.Value) *_object { + self := runtime.newGoArrayObject(value) + self.prototype = runtime.global.ArrayPrototype + return self +} + +func (runtime *_runtime) parse(filename string, src, sm interface{}) (*ast.Program, error) { + return parser.ParseFileWithSourceMap(nil, filename, src, sm, 0) +} + +func (runtime *_runtime) cmpl_parse(filename string, src, sm interface{}) (*_nodeProgram, error) { + program, err := parser.ParseFileWithSourceMap(nil, filename, src, sm, 0) + if err != nil { + return nil, err + } + + return cmpl_parse(program), nil +} + +func (self *_runtime) parseSource(src, sm interface{}) (*_nodeProgram, *ast.Program, error) { + switch src := src.(type) { + case *ast.Program: + return nil, src, nil + case *Script: + return src.program, nil, nil + } + + program, err := self.parse("", src, sm) + + return nil, program, err +} + +func (self *_runtime) cmpl_runOrEval(src, sm interface{}, eval bool) (Value, error) { + result := Value{} + cmpl_program, program, err := self.parseSource(src, sm) + if err != nil { + return result, err + } + if cmpl_program == nil { + cmpl_program = cmpl_parse(program) + } + err = catchPanic(func() { + result = self.cmpl_evaluate_nodeProgram(cmpl_program, eval) + }) + switch result.kind { + case valueEmpty: + result = Value{} + case valueReference: + result = result.resolve() + } + return result, err +} + +func (self *_runtime) cmpl_run(src, sm interface{}) (Value, error) { + return self.cmpl_runOrEval(src, sm, false) +} + +func (self *_runtime) cmpl_eval(src, sm interface{}) (Value, error) { + return self.cmpl_runOrEval(src, sm, true) +} + +func (self *_runtime) parseThrow(err error) { + if err == nil { + return + } + switch err := err.(type) { + case parser.ErrorList: + { + err := err[0] + if err.Message == "Invalid left-hand side in assignment" { + panic(self.panicReferenceError(err.Message)) + } + panic(self.panicSyntaxError(err.Message)) + } + } + panic(self.panicSyntaxError(err.Error())) +} + +func (self *_runtime) cmpl_parseOrThrow(src, sm interface{}) *_nodeProgram { + program, err := self.cmpl_parse("", src, sm) + self.parseThrow(err) // Will panic/throw appropriately + return program +} diff --git a/vendor/github.com/robertkrimen/otto/scope.go b/vendor/github.com/robertkrimen/otto/scope.go new file mode 100644 index 000000000..465e6b98c --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/scope.go @@ -0,0 +1,35 @@ +package otto + +// _scope: +// entryFile +// entryIdx +// top? +// outer => nil + +// _stash: +// lexical +// variable +// +// _thisStash (ObjectEnvironment) +// _fnStash +// _dclStash + +// An ECMA-262 ExecutionContext +type _scope struct { + lexical _stash + variable _stash + this *_object + eval bool // Replace this with kind? + outer *_scope + depth int + + frame _frame +} + +func newScope(lexical _stash, variable _stash, this *_object) *_scope { + return &_scope{ + lexical: lexical, + variable: variable, + this: this, + } +} diff --git a/vendor/github.com/robertkrimen/otto/script.go b/vendor/github.com/robertkrimen/otto/script.go new file mode 100644 index 000000000..2ae890ecd --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/script.go @@ -0,0 +1,119 @@ +package otto + +import ( + "bytes" + "encoding/gob" + "errors" +) + +var ErrVersion = errors.New("version mismatch") + +var scriptVersion = "2014-04-13/1" + +// Script is a handle for some (reusable) JavaScript. +// Passing a Script value to a run method will evaluate the JavaScript. +// +type Script struct { + version string + program *_nodeProgram + filename string + src string +} + +// Compile will parse the given source and return a Script value or nil and +// an error if there was a problem during compilation. +// +// script, err := vm.Compile("", `var abc; if (!abc) abc = 0; abc += 2; abc;`) +// vm.Run(script) +// +func (self *Otto) Compile(filename string, src interface{}) (*Script, error) { + return self.CompileWithSourceMap(filename, src, nil) +} + +// CompileWithSourceMap does the same thing as Compile, but with the obvious +// difference of applying a source map. +func (self *Otto) CompileWithSourceMap(filename string, src, sm interface{}) (*Script, error) { + program, err := self.runtime.parse(filename, src, sm) + if err != nil { + return nil, err + } + + cmpl_program := cmpl_parse(program) + + script := &Script{ + version: scriptVersion, + program: cmpl_program, + filename: filename, + src: program.File.Source(), + } + + return script, nil +} + +func (self *Script) String() string { + return "// " + self.filename + "\n" + self.src +} + +// MarshalBinary will marshal a script into a binary form. A marshalled script +// that is later unmarshalled can be executed on the same version of the otto runtime. +// +// The binary format can change at any time and should be considered unspecified and opaque. +// +func (self *Script) marshalBinary() ([]byte, error) { + var bfr bytes.Buffer + encoder := gob.NewEncoder(&bfr) + err := encoder.Encode(self.version) + if err != nil { + return nil, err + } + err = encoder.Encode(self.program) + if err != nil { + return nil, err + } + err = encoder.Encode(self.filename) + if err != nil { + return nil, err + } + err = encoder.Encode(self.src) + if err != nil { + return nil, err + } + return bfr.Bytes(), nil +} + +// UnmarshalBinary will vivify a marshalled script into something usable. If the script was +// originally marshalled on a different version of the otto runtime, then this method +// will return an error. +// +// The binary format can change at any time and should be considered unspecified and opaque. +// +func (self *Script) unmarshalBinary(data []byte) error { + decoder := gob.NewDecoder(bytes.NewReader(data)) + err := decoder.Decode(&self.version) + if err != nil { + goto error + } + if self.version != scriptVersion { + err = ErrVersion + goto error + } + err = decoder.Decode(&self.program) + if err != nil { + goto error + } + err = decoder.Decode(&self.filename) + if err != nil { + goto error + } + err = decoder.Decode(&self.src) + if err != nil { + goto error + } + return nil +error: + self.version = "" + self.program = nil + self.filename = "" + self.src = "" + return err +} diff --git a/vendor/github.com/robertkrimen/otto/stash.go b/vendor/github.com/robertkrimen/otto/stash.go new file mode 100644 index 000000000..0d3ffa511 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/stash.go @@ -0,0 +1,296 @@ +package otto + +import ( + "fmt" +) + +// ====== +// _stash +// ====== + +type _stash interface { + hasBinding(string) bool // + createBinding(string, bool, Value) // CreateMutableBinding + setBinding(string, Value, bool) // SetMutableBinding + getBinding(string, bool) Value // GetBindingValue + deleteBinding(string) bool // + setValue(string, Value, bool) // createBinding + setBinding + + outer() _stash + runtime() *_runtime + + newReference(string, bool, _at) _reference + + clone(clone *_clone) _stash +} + +// ========== +// _objectStash +// ========== + +type _objectStash struct { + _runtime *_runtime + _outer _stash + object *_object +} + +func (self *_objectStash) runtime() *_runtime { + return self._runtime +} + +func (runtime *_runtime) newObjectStash(object *_object, outer _stash) *_objectStash { + if object == nil { + object = runtime.newBaseObject() + object.class = "environment" + } + return &_objectStash{ + _runtime: runtime, + _outer: outer, + object: object, + } +} + +func (in *_objectStash) clone(clone *_clone) _stash { + out, exists := clone.objectStash(in) + if exists { + return out + } + *out = _objectStash{ + clone.runtime, + clone.stash(in._outer), + clone.object(in.object), + } + return out +} + +func (self *_objectStash) hasBinding(name string) bool { + return self.object.hasProperty(name) +} + +func (self *_objectStash) createBinding(name string, deletable bool, value Value) { + if self.object.hasProperty(name) { + panic(hereBeDragons()) + } + mode := _propertyMode(0111) + if !deletable { + mode = _propertyMode(0110) + } + // TODO False? + self.object.defineProperty(name, value, mode, false) +} + +func (self *_objectStash) setBinding(name string, value Value, strict bool) { + self.object.put(name, value, strict) +} + +func (self *_objectStash) setValue(name string, value Value, throw bool) { + if !self.hasBinding(name) { + self.createBinding(name, true, value) // Configurable by default + } else { + self.setBinding(name, value, throw) + } +} + +func (self *_objectStash) getBinding(name string, throw bool) Value { + if self.object.hasProperty(name) { + return self.object.get(name) + } + if throw { // strict? + panic(self._runtime.panicReferenceError("Not Defined", name)) + } + return Value{} +} + +func (self *_objectStash) deleteBinding(name string) bool { + return self.object.delete(name, false) +} + +func (self *_objectStash) outer() _stash { + return self._outer +} + +func (self *_objectStash) newReference(name string, strict bool, at _at) _reference { + return newPropertyReference(self._runtime, self.object, name, strict, at) +} + +// ========= +// _dclStash +// ========= + +type _dclStash struct { + _runtime *_runtime + _outer _stash + property map[string]_dclProperty +} + +type _dclProperty struct { + value Value + mutable bool + deletable bool + readable bool +} + +func (runtime *_runtime) newDeclarationStash(outer _stash) *_dclStash { + return &_dclStash{ + _runtime: runtime, + _outer: outer, + property: map[string]_dclProperty{}, + } +} + +func (in *_dclStash) clone(clone *_clone) _stash { + out, exists := clone.dclStash(in) + if exists { + return out + } + property := make(map[string]_dclProperty, len(in.property)) + for index, value := range in.property { + property[index] = clone.dclProperty(value) + } + *out = _dclStash{ + clone.runtime, + clone.stash(in._outer), + property, + } + return out +} + +func (self *_dclStash) hasBinding(name string) bool { + _, exists := self.property[name] + return exists +} + +func (self *_dclStash) runtime() *_runtime { + return self._runtime +} + +func (self *_dclStash) createBinding(name string, deletable bool, value Value) { + _, exists := self.property[name] + if exists { + panic(fmt.Errorf("createBinding: %s: already exists", name)) + } + self.property[name] = _dclProperty{ + value: value, + mutable: true, + deletable: deletable, + readable: false, + } +} + +func (self *_dclStash) setBinding(name string, value Value, strict bool) { + property, exists := self.property[name] + if !exists { + panic(fmt.Errorf("setBinding: %s: missing", name)) + } + if property.mutable { + property.value = value + self.property[name] = property + } else { + self._runtime.typeErrorResult(strict) + } +} + +func (self *_dclStash) setValue(name string, value Value, throw bool) { + if !self.hasBinding(name) { + self.createBinding(name, false, value) // NOT deletable by default + } else { + self.setBinding(name, value, throw) + } +} + +// FIXME This is called a __lot__ +func (self *_dclStash) getBinding(name string, throw bool) Value { + property, exists := self.property[name] + if !exists { + panic(fmt.Errorf("getBinding: %s: missing", name)) + } + if !property.mutable && !property.readable { + if throw { // strict? + panic(self._runtime.panicTypeError()) + } + return Value{} + } + return property.value +} + +func (self *_dclStash) deleteBinding(name string) bool { + property, exists := self.property[name] + if !exists { + return true + } + if !property.deletable { + return false + } + delete(self.property, name) + return true +} + +func (self *_dclStash) outer() _stash { + return self._outer +} + +func (self *_dclStash) newReference(name string, strict bool, _ _at) _reference { + return &_stashReference{ + name: name, + base: self, + } +} + +// ======== +// _fnStash +// ======== + +type _fnStash struct { + _dclStash + arguments *_object + indexOfArgumentName map[string]string +} + +func (runtime *_runtime) newFunctionStash(outer _stash) *_fnStash { + return &_fnStash{ + _dclStash: _dclStash{ + _runtime: runtime, + _outer: outer, + property: map[string]_dclProperty{}, + }, + } +} + +func (in *_fnStash) clone(clone *_clone) _stash { + out, exists := clone.fnStash(in) + if exists { + return out + } + dclStash := in._dclStash.clone(clone).(*_dclStash) + index := make(map[string]string, len(in.indexOfArgumentName)) + for name, value := range in.indexOfArgumentName { + index[name] = value + } + *out = _fnStash{ + _dclStash: *dclStash, + arguments: clone.object(in.arguments), + indexOfArgumentName: index, + } + return out +} + +func getStashProperties(stash _stash) (keys []string) { + switch vars := stash.(type) { + case *_dclStash: + for k := range vars.property { + keys = append(keys, k) + } + case *_fnStash: + for k := range vars.property { + keys = append(keys, k) + } + case *_objectStash: + for k := range vars.object.property { + keys = append(keys, k) + } + default: + panic("unknown stash type") + } + + return +} diff --git a/vendor/github.com/robertkrimen/otto/token/Makefile b/vendor/github.com/robertkrimen/otto/token/Makefile new file mode 100644 index 000000000..1e85c7348 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/token/Makefile @@ -0,0 +1,2 @@ +token_const.go: tokenfmt + ./$^ | gofmt > $@ diff --git a/vendor/github.com/robertkrimen/otto/token/README.markdown b/vendor/github.com/robertkrimen/otto/token/README.markdown new file mode 100644 index 000000000..ff3b16104 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/token/README.markdown @@ -0,0 +1,171 @@ +# token +-- + import "github.com/robertkrimen/otto/token" + +Package token defines constants representing the lexical tokens of JavaScript +(ECMA5). + +## Usage + +```go +const ( + ILLEGAL + EOF + COMMENT + KEYWORD + + STRING + BOOLEAN + NULL + NUMBER + IDENTIFIER + + PLUS // + + MINUS // - + MULTIPLY // * + SLASH // / + REMAINDER // % + + AND // & + OR // | + EXCLUSIVE_OR // ^ + SHIFT_LEFT // << + SHIFT_RIGHT // >> + UNSIGNED_SHIFT_RIGHT // >>> + AND_NOT // &^ + + ADD_ASSIGN // += + SUBTRACT_ASSIGN // -= + MULTIPLY_ASSIGN // *= + QUOTIENT_ASSIGN // /= + REMAINDER_ASSIGN // %= + + AND_ASSIGN // &= + OR_ASSIGN // |= + EXCLUSIVE_OR_ASSIGN // ^= + SHIFT_LEFT_ASSIGN // <<= + SHIFT_RIGHT_ASSIGN // >>= + UNSIGNED_SHIFT_RIGHT_ASSIGN // >>>= + AND_NOT_ASSIGN // &^= + + LOGICAL_AND // && + LOGICAL_OR // || + INCREMENT // ++ + DECREMENT // -- + + EQUAL // == + STRICT_EQUAL // === + LESS // < + GREATER // > + ASSIGN // = + NOT // ! + + BITWISE_NOT // ~ + + NOT_EQUAL // != + STRICT_NOT_EQUAL // !== + LESS_OR_EQUAL // <= + GREATER_OR_EQUAL // >= + + LEFT_PARENTHESIS // ( + LEFT_BRACKET // [ + LEFT_BRACE // { + COMMA // , + PERIOD // . + + RIGHT_PARENTHESIS // ) + RIGHT_BRACKET // ] + RIGHT_BRACE // } + SEMICOLON // ; + COLON // : + QUESTION_MARK // ? + + IF + IN + DO + + VAR + FOR + NEW + TRY + + THIS + ELSE + CASE + VOID + WITH + + WHILE + BREAK + CATCH + THROW + + RETURN + TYPEOF + DELETE + SWITCH + + DEFAULT + FINALLY + + FUNCTION + CONTINUE + DEBUGGER + + INSTANCEOF +) +``` + +#### type Token + +```go +type Token int +``` + +Token is the set of lexical tokens in JavaScript (ECMA5). + +#### func IsKeyword + +```go +func IsKeyword(literal string) (Token, bool) +``` +IsKeyword returns the keyword token if literal is a keyword, a KEYWORD token if +the literal is a future keyword (const, let, class, super, ...), or 0 if the +literal is not a keyword. + +If the literal is a keyword, IsKeyword returns a second value indicating if the +literal is considered a future keyword in strict-mode only. + +7.6.1.2 Future Reserved Words: + + const + class + enum + export + extends + import + super + +7.6.1.2 Future Reserved Words (strict): + + implements + interface + let + package + private + protected + public + static + +#### func (Token) String + +```go +func (tkn Token) String() string +``` +String returns the string corresponding to the token. For operators, delimiters, +and keywords the string is the actual token string (e.g., for the token PLUS, +the String() is "+"). For all other tokens the string corresponds to the token +name (e.g. for the token IDENTIFIER, the string is "IDENTIFIER"). + +-- +**godocdown** http://github.com/robertkrimen/godocdown diff --git a/vendor/github.com/robertkrimen/otto/token/token.go b/vendor/github.com/robertkrimen/otto/token/token.go new file mode 100644 index 000000000..0e941ac96 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/token/token.go @@ -0,0 +1,116 @@ +// Package token defines constants representing the lexical tokens of JavaScript (ECMA5). +package token + +import ( + "strconv" +) + +// Token is the set of lexical tokens in JavaScript (ECMA5). +type Token int + +// String returns the string corresponding to the token. +// For operators, delimiters, and keywords the string is the actual +// token string (e.g., for the token PLUS, the String() is +// "+"). For all other tokens the string corresponds to the token +// name (e.g. for the token IDENTIFIER, the string is "IDENTIFIER"). +// +func (tkn Token) String() string { + if 0 == tkn { + return "UNKNOWN" + } + if tkn < Token(len(token2string)) { + return token2string[tkn] + } + return "token(" + strconv.Itoa(int(tkn)) + ")" +} + +// This is not used for anything +func (tkn Token) precedence(in bool) int { + + switch tkn { + case LOGICAL_OR: + return 1 + + case LOGICAL_AND: + return 2 + + case OR, OR_ASSIGN: + return 3 + + case EXCLUSIVE_OR: + return 4 + + case AND, AND_ASSIGN, AND_NOT, AND_NOT_ASSIGN: + return 5 + + case EQUAL, + NOT_EQUAL, + STRICT_EQUAL, + STRICT_NOT_EQUAL: + return 6 + + case LESS, GREATER, LESS_OR_EQUAL, GREATER_OR_EQUAL, INSTANCEOF: + return 7 + + case IN: + if in { + return 7 + } + return 0 + + case SHIFT_LEFT, SHIFT_RIGHT, UNSIGNED_SHIFT_RIGHT: + fallthrough + case SHIFT_LEFT_ASSIGN, SHIFT_RIGHT_ASSIGN, UNSIGNED_SHIFT_RIGHT_ASSIGN: + return 8 + + case PLUS, MINUS, ADD_ASSIGN, SUBTRACT_ASSIGN: + return 9 + + case MULTIPLY, SLASH, REMAINDER, MULTIPLY_ASSIGN, QUOTIENT_ASSIGN, REMAINDER_ASSIGN: + return 11 + } + return 0 +} + +type _keyword struct { + token Token + futureKeyword bool + strict bool +} + +// IsKeyword returns the keyword token if literal is a keyword, a KEYWORD token +// if the literal is a future keyword (const, let, class, super, ...), or 0 if the literal is not a keyword. +// +// If the literal is a keyword, IsKeyword returns a second value indicating if the literal +// is considered a future keyword in strict-mode only. +// +// 7.6.1.2 Future Reserved Words: +// +// const +// class +// enum +// export +// extends +// import +// super +// +// 7.6.1.2 Future Reserved Words (strict): +// +// implements +// interface +// let +// package +// private +// protected +// public +// static +// +func IsKeyword(literal string) (Token, bool) { + if keyword, exists := keywordTable[literal]; exists { + if keyword.futureKeyword { + return KEYWORD, keyword.strict + } + return keyword.token, false + } + return 0, false +} diff --git a/vendor/github.com/robertkrimen/otto/token/token_const.go b/vendor/github.com/robertkrimen/otto/token/token_const.go new file mode 100644 index 000000000..b1d83c6de --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/token/token_const.go @@ -0,0 +1,349 @@ +package token + +const ( + _ Token = iota + + ILLEGAL + EOF + COMMENT + KEYWORD + + STRING + BOOLEAN + NULL + NUMBER + IDENTIFIER + + PLUS // + + MINUS // - + MULTIPLY // * + SLASH // / + REMAINDER // % + + AND // & + OR // | + EXCLUSIVE_OR // ^ + SHIFT_LEFT // << + SHIFT_RIGHT // >> + UNSIGNED_SHIFT_RIGHT // >>> + AND_NOT // &^ + + ADD_ASSIGN // += + SUBTRACT_ASSIGN // -= + MULTIPLY_ASSIGN // *= + QUOTIENT_ASSIGN // /= + REMAINDER_ASSIGN // %= + + AND_ASSIGN // &= + OR_ASSIGN // |= + EXCLUSIVE_OR_ASSIGN // ^= + SHIFT_LEFT_ASSIGN // <<= + SHIFT_RIGHT_ASSIGN // >>= + UNSIGNED_SHIFT_RIGHT_ASSIGN // >>>= + AND_NOT_ASSIGN // &^= + + LOGICAL_AND // && + LOGICAL_OR // || + INCREMENT // ++ + DECREMENT // -- + + EQUAL // == + STRICT_EQUAL // === + LESS // < + GREATER // > + ASSIGN // = + NOT // ! + + BITWISE_NOT // ~ + + NOT_EQUAL // != + STRICT_NOT_EQUAL // !== + LESS_OR_EQUAL // <= + GREATER_OR_EQUAL // >= + + LEFT_PARENTHESIS // ( + LEFT_BRACKET // [ + LEFT_BRACE // { + COMMA // , + PERIOD // . + + RIGHT_PARENTHESIS // ) + RIGHT_BRACKET // ] + RIGHT_BRACE // } + SEMICOLON // ; + COLON // : + QUESTION_MARK // ? + + firstKeyword + IF + IN + DO + + VAR + FOR + NEW + TRY + + THIS + ELSE + CASE + VOID + WITH + + WHILE + BREAK + CATCH + THROW + + RETURN + TYPEOF + DELETE + SWITCH + + DEFAULT + FINALLY + + FUNCTION + CONTINUE + DEBUGGER + + INSTANCEOF + lastKeyword +) + +var token2string = [...]string{ + ILLEGAL: "ILLEGAL", + EOF: "EOF", + COMMENT: "COMMENT", + KEYWORD: "KEYWORD", + STRING: "STRING", + BOOLEAN: "BOOLEAN", + NULL: "NULL", + NUMBER: "NUMBER", + IDENTIFIER: "IDENTIFIER", + PLUS: "+", + MINUS: "-", + MULTIPLY: "*", + SLASH: "/", + REMAINDER: "%", + AND: "&", + OR: "|", + EXCLUSIVE_OR: "^", + SHIFT_LEFT: "<<", + SHIFT_RIGHT: ">>", + UNSIGNED_SHIFT_RIGHT: ">>>", + AND_NOT: "&^", + ADD_ASSIGN: "+=", + SUBTRACT_ASSIGN: "-=", + MULTIPLY_ASSIGN: "*=", + QUOTIENT_ASSIGN: "/=", + REMAINDER_ASSIGN: "%=", + AND_ASSIGN: "&=", + OR_ASSIGN: "|=", + EXCLUSIVE_OR_ASSIGN: "^=", + SHIFT_LEFT_ASSIGN: "<<=", + SHIFT_RIGHT_ASSIGN: ">>=", + UNSIGNED_SHIFT_RIGHT_ASSIGN: ">>>=", + AND_NOT_ASSIGN: "&^=", + LOGICAL_AND: "&&", + LOGICAL_OR: "||", + INCREMENT: "++", + DECREMENT: "--", + EQUAL: "==", + STRICT_EQUAL: "===", + LESS: "<", + GREATER: ">", + ASSIGN: "=", + NOT: "!", + BITWISE_NOT: "~", + NOT_EQUAL: "!=", + STRICT_NOT_EQUAL: "!==", + LESS_OR_EQUAL: "<=", + GREATER_OR_EQUAL: ">=", + LEFT_PARENTHESIS: "(", + LEFT_BRACKET: "[", + LEFT_BRACE: "{", + COMMA: ",", + PERIOD: ".", + RIGHT_PARENTHESIS: ")", + RIGHT_BRACKET: "]", + RIGHT_BRACE: "}", + SEMICOLON: ";", + COLON: ":", + QUESTION_MARK: "?", + IF: "if", + IN: "in", + DO: "do", + VAR: "var", + FOR: "for", + NEW: "new", + TRY: "try", + THIS: "this", + ELSE: "else", + CASE: "case", + VOID: "void", + WITH: "with", + WHILE: "while", + BREAK: "break", + CATCH: "catch", + THROW: "throw", + RETURN: "return", + TYPEOF: "typeof", + DELETE: "delete", + SWITCH: "switch", + DEFAULT: "default", + FINALLY: "finally", + FUNCTION: "function", + CONTINUE: "continue", + DEBUGGER: "debugger", + INSTANCEOF: "instanceof", +} + +var keywordTable = map[string]_keyword{ + "if": _keyword{ + token: IF, + }, + "in": _keyword{ + token: IN, + }, + "do": _keyword{ + token: DO, + }, + "var": _keyword{ + token: VAR, + }, + "for": _keyword{ + token: FOR, + }, + "new": _keyword{ + token: NEW, + }, + "try": _keyword{ + token: TRY, + }, + "this": _keyword{ + token: THIS, + }, + "else": _keyword{ + token: ELSE, + }, + "case": _keyword{ + token: CASE, + }, + "void": _keyword{ + token: VOID, + }, + "with": _keyword{ + token: WITH, + }, + "while": _keyword{ + token: WHILE, + }, + "break": _keyword{ + token: BREAK, + }, + "catch": _keyword{ + token: CATCH, + }, + "throw": _keyword{ + token: THROW, + }, + "return": _keyword{ + token: RETURN, + }, + "typeof": _keyword{ + token: TYPEOF, + }, + "delete": _keyword{ + token: DELETE, + }, + "switch": _keyword{ + token: SWITCH, + }, + "default": _keyword{ + token: DEFAULT, + }, + "finally": _keyword{ + token: FINALLY, + }, + "function": _keyword{ + token: FUNCTION, + }, + "continue": _keyword{ + token: CONTINUE, + }, + "debugger": _keyword{ + token: DEBUGGER, + }, + "instanceof": _keyword{ + token: INSTANCEOF, + }, + "const": _keyword{ + token: KEYWORD, + futureKeyword: true, + }, + "class": _keyword{ + token: KEYWORD, + futureKeyword: true, + }, + "enum": _keyword{ + token: KEYWORD, + futureKeyword: true, + }, + "export": _keyword{ + token: KEYWORD, + futureKeyword: true, + }, + "extends": _keyword{ + token: KEYWORD, + futureKeyword: true, + }, + "import": _keyword{ + token: KEYWORD, + futureKeyword: true, + }, + "super": _keyword{ + token: KEYWORD, + futureKeyword: true, + }, + "implements": _keyword{ + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "interface": _keyword{ + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "let": _keyword{ + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "package": _keyword{ + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "private": _keyword{ + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "protected": _keyword{ + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "public": _keyword{ + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "static": _keyword{ + token: KEYWORD, + futureKeyword: true, + strict: true, + }, +} diff --git a/vendor/github.com/robertkrimen/otto/token/tokenfmt b/vendor/github.com/robertkrimen/otto/token/tokenfmt new file mode 100755 index 000000000..63dd5d9e6 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/token/tokenfmt @@ -0,0 +1,222 @@ +#!/usr/bin/env perl + +use strict; +use warnings; + +my (%token, @order, @keywords); + +{ + my $keywords; + my @const; + push @const, <<_END_; +package token + +const( + _ Token = iota +_END_ + + for (split m/\n/, <<_END_) { +ILLEGAL +EOF +COMMENT +KEYWORD + +STRING +BOOLEAN +NULL +NUMBER +IDENTIFIER + +PLUS + +MINUS - +MULTIPLY * +SLASH / +REMAINDER % + +AND & +OR | +EXCLUSIVE_OR ^ +SHIFT_LEFT << +SHIFT_RIGHT >> +UNSIGNED_SHIFT_RIGHT >>> +AND_NOT &^ + +ADD_ASSIGN += +SUBTRACT_ASSIGN -= +MULTIPLY_ASSIGN *= +QUOTIENT_ASSIGN /= +REMAINDER_ASSIGN %= + +AND_ASSIGN &= +OR_ASSIGN |= +EXCLUSIVE_OR_ASSIGN ^= +SHIFT_LEFT_ASSIGN <<= +SHIFT_RIGHT_ASSIGN >>= +UNSIGNED_SHIFT_RIGHT_ASSIGN >>>= +AND_NOT_ASSIGN &^= + +LOGICAL_AND && +LOGICAL_OR || +INCREMENT ++ +DECREMENT -- + +EQUAL == +STRICT_EQUAL === +LESS < +GREATER > +ASSIGN = +NOT ! + +BITWISE_NOT ~ + +NOT_EQUAL != +STRICT_NOT_EQUAL !== +LESS_OR_EQUAL <= +GREATER_OR_EQUAL <= + +LEFT_PARENTHESIS ( +LEFT_BRACKET [ +LEFT_BRACE { +COMMA , +PERIOD . + +RIGHT_PARENTHESIS ) +RIGHT_BRACKET ] +RIGHT_BRACE } +SEMICOLON ; +COLON : +QUESTION_MARK ? + +firstKeyword +IF +IN +DO + +VAR +FOR +NEW +TRY + +THIS +ELSE +CASE +VOID +WITH + +WHILE +BREAK +CATCH +THROW + +RETURN +TYPEOF +DELETE +SWITCH + +DEFAULT +FINALLY + +FUNCTION +CONTINUE +DEBUGGER + +INSTANCEOF +lastKeyword +_END_ + chomp; + + next if m/^\s*#/; + + my ($name, $symbol) = m/(\w+)\s*(\S+)?/; + + if (defined $symbol) { + push @order, $name; + push @const, "$name // $symbol"; + $token{$name} = $symbol; + } elsif (defined $name) { + $keywords ||= $name eq 'firstKeyword'; + + push @const, $name; + #$const[-1] .= " Token = iota" if 2 == @const; + if ($name =~ m/^([A-Z]+)/) { + push @keywords, $name if $keywords; + push @order, $name; + if ($token{SEMICOLON}) { + $token{$name} = lc $1; + } else { + $token{$name} = $name; + } + } + } else { + push @const, ""; + } + + } + push @const, ")"; + print join "\n", @const, ""; +} + +{ + print <<_END_; + +var token2string = [...]string{ +_END_ + for my $name (@order) { + print "$name: \"$token{$name}\",\n"; + } + print <<_END_; +} +_END_ + + print <<_END_; + +var keywordTable = map[string]_keyword{ +_END_ + for my $name (@keywords) { + print <<_END_ + "@{[ lc $name ]}": _keyword{ + token: $name, + }, +_END_ + } + + for my $name (qw/ + const + class + enum + export + extends + import + super + /) { + print <<_END_ + "$name": _keyword{ + token: KEYWORD, + futureKeyword: true, + }, +_END_ + } + + for my $name (qw/ + implements + interface + let + package + private + protected + public + static + /) { + print <<_END_ + "$name": _keyword{ + token: KEYWORD, + futureKeyword: true, + strict: true, + }, +_END_ + } + + print <<_END_; +} +_END_ +} diff --git a/vendor/github.com/robertkrimen/otto/type_arguments.go b/vendor/github.com/robertkrimen/otto/type_arguments.go new file mode 100644 index 000000000..841d75855 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_arguments.go @@ -0,0 +1,106 @@ +package otto + +import ( + "strconv" +) + +func (runtime *_runtime) newArgumentsObject(indexOfParameterName []string, stash _stash, length int) *_object { + self := runtime.newClassObject("Arguments") + + for index, _ := range indexOfParameterName { + name := strconv.FormatInt(int64(index), 10) + objectDefineOwnProperty(self, name, _property{Value{}, 0111}, false) + } + + self.objectClass = _classArguments + self.value = _argumentsObject{ + indexOfParameterName: indexOfParameterName, + stash: stash, + } + + self.prototype = runtime.global.ObjectPrototype + + self.defineProperty("length", toValue_int(length), 0101, false) + + return self +} + +type _argumentsObject struct { + indexOfParameterName []string + // function(abc, def, ghi) + // indexOfParameterName[0] = "abc" + // indexOfParameterName[1] = "def" + // indexOfParameterName[2] = "ghi" + // ... + stash _stash +} + +func (in _argumentsObject) clone(clone *_clone) _argumentsObject { + indexOfParameterName := make([]string, len(in.indexOfParameterName)) + copy(indexOfParameterName, in.indexOfParameterName) + return _argumentsObject{ + indexOfParameterName, + clone.stash(in.stash), + } +} + +func (self _argumentsObject) get(name string) (Value, bool) { + index := stringToArrayIndex(name) + if index >= 0 && index < int64(len(self.indexOfParameterName)) { + name := self.indexOfParameterName[index] + if name == "" { + return Value{}, false + } + return self.stash.getBinding(name, false), true + } + return Value{}, false +} + +func (self _argumentsObject) put(name string, value Value) { + index := stringToArrayIndex(name) + name = self.indexOfParameterName[index] + self.stash.setBinding(name, value, false) +} + +func (self _argumentsObject) delete(name string) { + index := stringToArrayIndex(name) + self.indexOfParameterName[index] = "" +} + +func argumentsGet(self *_object, name string) Value { + if value, exists := self.value.(_argumentsObject).get(name); exists { + return value + } + return objectGet(self, name) +} + +func argumentsGetOwnProperty(self *_object, name string) *_property { + property := objectGetOwnProperty(self, name) + if value, exists := self.value.(_argumentsObject).get(name); exists { + property.value = value + } + return property +} + +func argumentsDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool { + if _, exists := self.value.(_argumentsObject).get(name); exists { + if !objectDefineOwnProperty(self, name, descriptor, false) { + return self.runtime.typeErrorResult(throw) + } + if value, valid := descriptor.value.(Value); valid { + self.value.(_argumentsObject).put(name, value) + } + return true + } + return objectDefineOwnProperty(self, name, descriptor, throw) +} + +func argumentsDelete(self *_object, name string, throw bool) bool { + if !objectDelete(self, name, throw) { + return false + } + if _, exists := self.value.(_argumentsObject).get(name); exists { + self.value.(_argumentsObject).delete(name) + } + return true +} diff --git a/vendor/github.com/robertkrimen/otto/type_array.go b/vendor/github.com/robertkrimen/otto/type_array.go new file mode 100644 index 000000000..c8a974b02 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_array.go @@ -0,0 +1,109 @@ +package otto + +import ( + "strconv" +) + +func (runtime *_runtime) newArrayObject(length uint32) *_object { + self := runtime.newObject() + self.class = "Array" + self.defineProperty("length", toValue_uint32(length), 0100, false) + self.objectClass = _classArray + return self +} + +func isArray(object *_object) bool { + return object != nil && (object.class == "Array" || object.class == "GoArray") +} + +func objectLength(object *_object) uint32 { + if object == nil { + return 0 + } + switch object.class { + case "Array": + return object.get("length").value.(uint32) + case "String": + return uint32(object.get("length").value.(int)) + case "GoArray": + return uint32(object.get("length").value.(int)) + } + return 0 +} + +func arrayUint32(rt *_runtime, value Value) uint32 { + nm := value.number() + if nm.kind != numberInteger || !isUint32(nm.int64) { + // FIXME + panic(rt.panicRangeError()) + } + return uint32(nm.int64) +} + +func arrayDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool { + lengthProperty := self.getOwnProperty("length") + lengthValue, valid := lengthProperty.value.(Value) + if !valid { + panic("Array.length != Value{}") + } + length := lengthValue.value.(uint32) + if name == "length" { + if descriptor.value == nil { + return objectDefineOwnProperty(self, name, descriptor, throw) + } + newLengthValue, isValue := descriptor.value.(Value) + if !isValue { + panic(self.runtime.panicTypeError()) + } + newLength := arrayUint32(self.runtime, newLengthValue) + descriptor.value = toValue_uint32(newLength) + if newLength > length { + return objectDefineOwnProperty(self, name, descriptor, throw) + } + if !lengthProperty.writable() { + goto Reject + } + newWritable := true + if descriptor.mode&0700 == 0 { + // If writable is off + newWritable = false + descriptor.mode |= 0100 + } + if !objectDefineOwnProperty(self, name, descriptor, throw) { + return false + } + for newLength < length { + length-- + if !self.delete(strconv.FormatInt(int64(length), 10), false) { + descriptor.value = toValue_uint32(length + 1) + if !newWritable { + descriptor.mode &= 0077 + } + objectDefineOwnProperty(self, name, descriptor, false) + goto Reject + } + } + if !newWritable { + descriptor.mode &= 0077 + objectDefineOwnProperty(self, name, descriptor, false) + } + } else if index := stringToArrayIndex(name); index >= 0 { + if index >= int64(length) && !lengthProperty.writable() { + goto Reject + } + if !objectDefineOwnProperty(self, strconv.FormatInt(index, 10), descriptor, false) { + goto Reject + } + if index >= int64(length) { + lengthProperty.value = toValue_uint32(uint32(index + 1)) + objectDefineOwnProperty(self, "length", *lengthProperty, false) + return true + } + } + return objectDefineOwnProperty(self, name, descriptor, throw) +Reject: + if throw { + panic(self.runtime.panicTypeError()) + } + return false +} diff --git a/vendor/github.com/robertkrimen/otto/type_boolean.go b/vendor/github.com/robertkrimen/otto/type_boolean.go new file mode 100644 index 000000000..afc45c69b --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_boolean.go @@ -0,0 +1,13 @@ +package otto + +import ( + "strconv" +) + +func (runtime *_runtime) newBooleanObject(value Value) *_object { + return runtime.newPrimitiveObject("Boolean", toValue_bool(value.bool())) +} + +func booleanToString(value bool) string { + return strconv.FormatBool(value) +} diff --git a/vendor/github.com/robertkrimen/otto/type_date.go b/vendor/github.com/robertkrimen/otto/type_date.go new file mode 100644 index 000000000..7079e649c --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_date.go @@ -0,0 +1,299 @@ +package otto + +import ( + "fmt" + "math" + "regexp" + Time "time" +) + +type _dateObject struct { + time Time.Time // Time from the "time" package, a cached version of time + epoch int64 + value Value + isNaN bool +} + +var ( + invalidDateObject = _dateObject{ + time: Time.Time{}, + epoch: -1, + value: NaNValue(), + isNaN: true, + } +) + +type _ecmaTime struct { + year int + month int + day int + hour int + minute int + second int + millisecond int + location *Time.Location // Basically, either local or UTC +} + +func ecmaTime(goTime Time.Time) _ecmaTime { + return _ecmaTime{ + goTime.Year(), + dateFromGoMonth(goTime.Month()), + goTime.Day(), + goTime.Hour(), + goTime.Minute(), + goTime.Second(), + goTime.Nanosecond() / (100 * 100 * 100), + goTime.Location(), + } +} + +func (self *_ecmaTime) goTime() Time.Time { + return Time.Date( + self.year, + dateToGoMonth(self.month), + self.day, + self.hour, + self.minute, + self.second, + self.millisecond*(100*100*100), + self.location, + ) +} + +func (self *_dateObject) Time() Time.Time { + return self.time +} + +func (self *_dateObject) Epoch() int64 { + return self.epoch +} + +func (self *_dateObject) Value() Value { + return self.value +} + +// FIXME A date should only be in the range of -100,000,000 to +100,000,000 (1970): 15.9.1.1 +func (self *_dateObject) SetNaN() { + self.time = Time.Time{} + self.epoch = -1 + self.value = NaNValue() + self.isNaN = true +} + +func (self *_dateObject) SetTime(time Time.Time) { + self.Set(timeToEpoch(time)) +} + +func epoch2dateObject(epoch float64) _dateObject { + date := _dateObject{} + date.Set(epoch) + return date +} + +func (self *_dateObject) Set(epoch float64) { + // epoch + self.epoch = epochToInteger(epoch) + + // time + time, err := epochToTime(epoch) + self.time = time // Is either a valid time, or the zero-value for time.Time + + // value & isNaN + if err != nil { + self.isNaN = true + self.epoch = -1 + self.value = NaNValue() + } else { + self.value = toValue_int64(self.epoch) + } +} + +func epochToInteger(value float64) int64 { + if value > 0 { + return int64(math.Floor(value)) + } + return int64(math.Ceil(value)) +} + +func epochToTime(value float64) (time Time.Time, err error) { + epochWithMilli := value + if math.IsNaN(epochWithMilli) || math.IsInf(epochWithMilli, 0) { + err = fmt.Errorf("Invalid time %v", value) + return + } + + epoch := int64(epochWithMilli / 1000) + milli := int64(epochWithMilli) % 1000 + + time = Time.Unix(int64(epoch), milli*1000000).UTC() + return +} + +func timeToEpoch(time Time.Time) float64 { + return float64(time.UnixNano() / (1000 * 1000)) +} + +func (runtime *_runtime) newDateObject(epoch float64) *_object { + self := runtime.newObject() + self.class = "Date" + + // FIXME This is ugly... + date := _dateObject{} + date.Set(epoch) + self.value = date + return self +} + +func (self *_object) dateValue() _dateObject { + value, _ := self.value.(_dateObject) + return value +} + +func dateObjectOf(rt *_runtime, _dateObject *_object) _dateObject { + if _dateObject == nil || _dateObject.class != "Date" { + panic(rt.panicTypeError()) + } + return _dateObject.dateValue() +} + +// JavaScript is 0-based, Go is 1-based (15.9.1.4) +func dateToGoMonth(month int) Time.Month { + return Time.Month(month + 1) +} + +func dateFromGoMonth(month Time.Month) int { + return int(month) - 1 +} + +// Both JavaScript & Go are 0-based (Sunday == 0) +func dateToGoDay(day int) Time.Weekday { + return Time.Weekday(day) +} + +func dateFromGoDay(day Time.Weekday) int { + return int(day) +} + +func newDateTime(argumentList []Value, location *Time.Location) (epoch float64) { + + pick := func(index int, default_ float64) (float64, bool) { + if index >= len(argumentList) { + return default_, false + } + value := argumentList[index].float64() + if math.IsNaN(value) || math.IsInf(value, 0) { + return 0, true + } + return value, false + } + + if len(argumentList) >= 2 { // 2-argument, 3-argument, ... + var year, month, day, hour, minute, second, millisecond float64 + var invalid bool + if year, invalid = pick(0, 1900.0); invalid { + goto INVALID + } + if month, invalid = pick(1, 0.0); invalid { + goto INVALID + } + if day, invalid = pick(2, 1.0); invalid { + goto INVALID + } + if hour, invalid = pick(3, 0.0); invalid { + goto INVALID + } + if minute, invalid = pick(4, 0.0); invalid { + goto INVALID + } + if second, invalid = pick(5, 0.0); invalid { + goto INVALID + } + if millisecond, invalid = pick(6, 0.0); invalid { + goto INVALID + } + + if year >= 0 && year <= 99 { + year += 1900 + } + + time := Time.Date(int(year), dateToGoMonth(int(month)), int(day), int(hour), int(minute), int(second), int(millisecond)*1000*1000, location) + return timeToEpoch(time) + + } else if len(argumentList) == 0 { // 0-argument + time := Time.Now().UTC() + return timeToEpoch(time) + } else { // 1-argument + value := valueOfArrayIndex(argumentList, 0) + value = toPrimitive(value) + if value.IsString() { + return dateParse(value.string()) + } + + return value.float64() + } + +INVALID: + epoch = math.NaN() + return +} + +var ( + dateLayoutList = []string{ + "2006", + "2006-01", + "2006-01-02", + + "2006T15:04", + "2006-01T15:04", + "2006-01-02T15:04", + + "2006T15:04:05", + "2006-01T15:04:05", + "2006-01-02T15:04:05", + + "2006T15:04:05.000", + "2006-01T15:04:05.000", + "2006-01-02T15:04:05.000", + + "2006T15:04-0700", + "2006-01T15:04-0700", + "2006-01-02T15:04-0700", + + "2006T15:04:05-0700", + "2006-01T15:04:05-0700", + "2006-01-02T15:04:05-0700", + + "2006T15:04:05.000-0700", + "2006-01T15:04:05.000-0700", + "2006-01-02T15:04:05.000-0700", + + Time.RFC1123, + } + matchDateTimeZone = regexp.MustCompile(`^(.*)(?:(Z)|([\+\-]\d{2}):(\d{2}))$`) +) + +func dateParse(date string) (epoch float64) { + // YYYY-MM-DDTHH:mm:ss.sssZ + var time Time.Time + var err error + { + date := date + if match := matchDateTimeZone.FindStringSubmatch(date); match != nil { + if match[2] == "Z" { + date = match[1] + "+0000" + } else { + date = match[1] + match[3] + match[4] + } + } + for _, layout := range dateLayoutList { + time, err = Time.Parse(layout, date) + if err == nil { + break + } + } + } + if err != nil { + return math.NaN() + } + return float64(time.UnixNano()) / (1000 * 1000) // UnixMilli() +} diff --git a/vendor/github.com/robertkrimen/otto/type_error.go b/vendor/github.com/robertkrimen/otto/type_error.go new file mode 100644 index 000000000..84e5d79b1 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_error.go @@ -0,0 +1,24 @@ +package otto + +func (rt *_runtime) newErrorObject(name string, message Value, stackFramesToPop int) *_object { + self := rt.newClassObject("Error") + if message.IsDefined() { + msg := message.string() + self.defineProperty("message", toValue_string(msg), 0111, false) + self.value = newError(rt, name, stackFramesToPop, msg) + } else { + self.value = newError(rt, name, stackFramesToPop) + } + + self.defineOwnProperty("stack", _property{ + value: _propertyGetSet{ + rt.newNativeFunction("get", "internal", 0, func(FunctionCall) Value { + return toValue_string(self.value.(_error).formatWithStack()) + }), + &_nilGetSetObject, + }, + mode: modeConfigureMask & modeOnMask, + }, false) + + return self +} diff --git a/vendor/github.com/robertkrimen/otto/type_function.go b/vendor/github.com/robertkrimen/otto/type_function.go new file mode 100644 index 000000000..5637dc605 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_function.go @@ -0,0 +1,292 @@ +package otto + +// _constructFunction +type _constructFunction func(*_object, []Value) Value + +// 13.2.2 [[Construct]] +func defaultConstruct(fn *_object, argumentList []Value) Value { + object := fn.runtime.newObject() + object.class = "Object" + + prototype := fn.get("prototype") + if prototype.kind != valueObject { + prototype = toValue_object(fn.runtime.global.ObjectPrototype) + } + object.prototype = prototype._object() + + this := toValue_object(object) + value := fn.call(this, argumentList, false, nativeFrame) + if value.kind == valueObject { + return value + } + return this +} + +// _nativeFunction +type _nativeFunction func(FunctionCall) Value + +// ===================== // +// _nativeFunctionObject // +// ===================== // + +type _nativeFunctionObject struct { + name string + file string + line int + call _nativeFunction // [[Call]] + construct _constructFunction // [[Construct]] +} + +func (runtime *_runtime) newNativeFunctionObject(name, file string, line int, native _nativeFunction, length int) *_object { + self := runtime.newClassObject("Function") + self.value = _nativeFunctionObject{ + name: name, + file: file, + line: line, + call: native, + construct: defaultConstruct, + } + self.defineProperty("length", toValue_int(length), 0000, false) + return self +} + +// =================== // +// _bindFunctionObject // +// =================== // + +type _bindFunctionObject struct { + target *_object + this Value + argumentList []Value +} + +func (runtime *_runtime) newBoundFunctionObject(target *_object, this Value, argumentList []Value) *_object { + self := runtime.newClassObject("Function") + self.value = _bindFunctionObject{ + target: target, + this: this, + argumentList: argumentList, + } + length := int(toInt32(target.get("length"))) + length -= len(argumentList) + if length < 0 { + length = 0 + } + self.defineProperty("length", toValue_int(length), 0000, false) + self.defineProperty("caller", Value{}, 0000, false) // TODO Should throw a TypeError + self.defineProperty("arguments", Value{}, 0000, false) // TODO Should throw a TypeError + return self +} + +// [[Construct]] +func (fn _bindFunctionObject) construct(argumentList []Value) Value { + object := fn.target + switch value := object.value.(type) { + case _nativeFunctionObject: + return value.construct(object, fn.argumentList) + case _nodeFunctionObject: + argumentList = append(fn.argumentList, argumentList...) + return object.construct(argumentList) + } + panic(fn.target.runtime.panicTypeError()) +} + +// =================== // +// _nodeFunctionObject // +// =================== // + +type _nodeFunctionObject struct { + node *_nodeFunctionLiteral + stash _stash +} + +func (runtime *_runtime) newNodeFunctionObject(node *_nodeFunctionLiteral, stash _stash) *_object { + self := runtime.newClassObject("Function") + self.value = _nodeFunctionObject{ + node: node, + stash: stash, + } + self.defineProperty("length", toValue_int(len(node.parameterList)), 0000, false) + return self +} + +// ======= // +// _object // +// ======= // + +func (self *_object) isCall() bool { + switch fn := self.value.(type) { + case _nativeFunctionObject: + return fn.call != nil + case _bindFunctionObject: + return true + case _nodeFunctionObject: + return true + } + return false +} + +func (self *_object) call(this Value, argumentList []Value, eval bool, frame _frame) Value { + switch fn := self.value.(type) { + + case _nativeFunctionObject: + // Since eval is a native function, we only have to check for it here + if eval { + eval = self == self.runtime.eval // If eval is true, then it IS a direct eval + } + + // Enter a scope, name from the native object... + rt := self.runtime + if rt.scope != nil && !eval { + rt.enterFunctionScope(rt.scope.lexical, this) + rt.scope.frame = _frame{ + native: true, + nativeFile: fn.file, + nativeLine: fn.line, + callee: fn.name, + file: nil, + } + defer func() { + rt.leaveScope() + }() + } + + return fn.call(FunctionCall{ + runtime: self.runtime, + eval: eval, + + This: this, + ArgumentList: argumentList, + Otto: self.runtime.otto, + }) + + case _bindFunctionObject: + // TODO Passthrough site, do not enter a scope + argumentList = append(fn.argumentList, argumentList...) + return fn.target.call(fn.this, argumentList, false, frame) + + case _nodeFunctionObject: + rt := self.runtime + stash := rt.enterFunctionScope(fn.stash, this) + rt.scope.frame = _frame{ + callee: fn.node.name, + file: fn.node.file, + } + defer func() { + rt.leaveScope() + }() + callValue := rt.cmpl_call_nodeFunction(self, stash, fn.node, this, argumentList) + if value, valid := callValue.value.(_result); valid { + return value.value + } + return callValue + } + + panic(self.runtime.panicTypeError("%v is not a function", toValue_object(self))) +} + +func (self *_object) construct(argumentList []Value) Value { + switch fn := self.value.(type) { + + case _nativeFunctionObject: + if fn.call == nil { + panic(self.runtime.panicTypeError("%v is not a function", toValue_object(self))) + } + if fn.construct == nil { + panic(self.runtime.panicTypeError("%v is not a constructor", toValue_object(self))) + } + return fn.construct(self, argumentList) + + case _bindFunctionObject: + return fn.construct(argumentList) + + case _nodeFunctionObject: + return defaultConstruct(self, argumentList) + } + + panic(self.runtime.panicTypeError("%v is not a function", toValue_object(self))) +} + +// 15.3.5.3 +func (self *_object) hasInstance(of Value) bool { + if !self.isCall() { + // We should not have a hasInstance method + panic(self.runtime.panicTypeError()) + } + if !of.IsObject() { + return false + } + prototype := self.get("prototype") + if !prototype.IsObject() { + panic(self.runtime.panicTypeError()) + } + prototypeObject := prototype._object() + + value := of._object().prototype + for value != nil { + if value == prototypeObject { + return true + } + value = value.prototype + } + return false +} + +// ============ // +// FunctionCall // +// ============ // + +// FunctionCall is an encapsulation of a JavaScript function call. +type FunctionCall struct { + runtime *_runtime + _thisObject *_object + eval bool // This call is a direct call to eval + + This Value + ArgumentList []Value + Otto *Otto +} + +// Argument will return the value of the argument at the given index. +// +// If no such argument exists, undefined is returned. +func (self FunctionCall) Argument(index int) Value { + return valueOfArrayIndex(self.ArgumentList, index) +} + +func (self FunctionCall) getArgument(index int) (Value, bool) { + return getValueOfArrayIndex(self.ArgumentList, index) +} + +func (self FunctionCall) slice(index int) []Value { + if index < len(self.ArgumentList) { + return self.ArgumentList[index:] + } + return []Value{} +} + +func (self *FunctionCall) thisObject() *_object { + if self._thisObject == nil { + this := self.This.resolve() // FIXME Is this right? + self._thisObject = self.runtime.toObject(this) + } + return self._thisObject +} + +func (self *FunctionCall) thisClassObject(class string) *_object { + thisObject := self.thisObject() + if thisObject.class != class { + panic(self.runtime.panicTypeError()) + } + return self._thisObject +} + +func (self FunctionCall) toObject(value Value) *_object { + return self.runtime.toObject(value) +} + +// CallerLocation will return file location information (file:line:pos) where this function is being called. +func (self FunctionCall) CallerLocation() string { + // see error.go for location() + return self.runtime.scope.outer.frame.location() +} diff --git a/vendor/github.com/robertkrimen/otto/type_go_array.go b/vendor/github.com/robertkrimen/otto/type_go_array.go new file mode 100644 index 000000000..13a0b10f2 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_go_array.go @@ -0,0 +1,134 @@ +package otto + +import ( + "reflect" + "strconv" +) + +func (runtime *_runtime) newGoArrayObject(value reflect.Value) *_object { + self := runtime.newObject() + self.class = "GoArray" + self.objectClass = _classGoArray + self.value = _newGoArrayObject(value) + return self +} + +type _goArrayObject struct { + value reflect.Value + writable bool + propertyMode _propertyMode +} + +func _newGoArrayObject(value reflect.Value) *_goArrayObject { + writable := value.Kind() == reflect.Ptr // The Array is addressable (like a Slice) + mode := _propertyMode(0010) + if writable { + mode = 0110 + } + self := &_goArrayObject{ + value: value, + writable: writable, + propertyMode: mode, + } + return self +} + +func (self _goArrayObject) getValue(index int64) (reflect.Value, bool) { + value := reflect.Indirect(self.value) + if index < int64(value.Len()) { + return value.Index(int(index)), true + } + return reflect.Value{}, false +} + +func (self _goArrayObject) setValue(index int64, value Value) bool { + indexValue, exists := self.getValue(index) + if !exists { + return false + } + reflectValue, err := value.toReflectValue(reflect.Indirect(self.value).Type().Elem().Kind()) + if err != nil { + panic(err) + } + indexValue.Set(reflectValue) + return true +} + +func goArrayGetOwnProperty(self *_object, name string) *_property { + // length + if name == "length" { + return &_property{ + value: toValue(reflect.Indirect(self.value.(*_goArrayObject).value).Len()), + mode: 0, + } + } + + // .0, .1, .2, ... + index := stringToArrayIndex(name) + if index >= 0 { + object := self.value.(*_goArrayObject) + value := Value{} + reflectValue, exists := object.getValue(index) + if exists { + value = self.runtime.toValue(reflectValue.Interface()) + } + return &_property{ + value: value, + mode: object.propertyMode, + } + } + + return objectGetOwnProperty(self, name) +} + +func goArrayEnumerate(self *_object, all bool, each func(string) bool) { + object := self.value.(*_goArrayObject) + // .0, .1, .2, ... + + for index, length := 0, object.value.Len(); index < length; index++ { + name := strconv.FormatInt(int64(index), 10) + if !each(name) { + return + } + } + + objectEnumerate(self, all, each) +} + +func goArrayDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool { + if name == "length" { + return self.runtime.typeErrorResult(throw) + } else if index := stringToArrayIndex(name); index >= 0 { + object := self.value.(*_goArrayObject) + if object.writable { + if self.value.(*_goArrayObject).setValue(index, descriptor.value.(Value)) { + return true + } + } + return self.runtime.typeErrorResult(throw) + } + return objectDefineOwnProperty(self, name, descriptor, throw) +} + +func goArrayDelete(self *_object, name string, throw bool) bool { + // length + if name == "length" { + return self.runtime.typeErrorResult(throw) + } + + // .0, .1, .2, ... + index := stringToArrayIndex(name) + if index >= 0 { + object := self.value.(*_goArrayObject) + if object.writable { + indexValue, exists := object.getValue(index) + if exists { + indexValue.Set(reflect.Zero(reflect.Indirect(object.value).Type().Elem())) + return true + } + } + return self.runtime.typeErrorResult(throw) + } + + return self.delete(name, throw) +} diff --git a/vendor/github.com/robertkrimen/otto/type_go_map.go b/vendor/github.com/robertkrimen/otto/type_go_map.go new file mode 100644 index 000000000..3e204a028 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_go_map.go @@ -0,0 +1,87 @@ +package otto + +import ( + "reflect" +) + +func (runtime *_runtime) newGoMapObject(value reflect.Value) *_object { + self := runtime.newObject() + self.class = "Object" // TODO Should this be something else? + self.objectClass = _classGoMap + self.value = _newGoMapObject(value) + return self +} + +type _goMapObject struct { + value reflect.Value + keyKind reflect.Kind + valueKind reflect.Kind +} + +func _newGoMapObject(value reflect.Value) *_goMapObject { + if value.Kind() != reflect.Map { + dbgf("%/panic//%@: %v != reflect.Map", value.Kind()) + } + self := &_goMapObject{ + value: value, + keyKind: value.Type().Key().Kind(), + valueKind: value.Type().Elem().Kind(), + } + return self +} + +func (self _goMapObject) toKey(name string) reflect.Value { + reflectValue, err := stringToReflectValue(name, self.keyKind) + if err != nil { + panic(err) + } + return reflectValue +} + +func (self _goMapObject) toValue(value Value) reflect.Value { + reflectValue, err := value.toReflectValue(self.valueKind) + if err != nil { + panic(err) + } + return reflectValue +} + +func goMapGetOwnProperty(self *_object, name string) *_property { + object := self.value.(*_goMapObject) + value := object.value.MapIndex(object.toKey(name)) + if value.IsValid() { + return &_property{self.runtime.toValue(value.Interface()), 0111} + } + + return nil +} + +func goMapEnumerate(self *_object, all bool, each func(string) bool) { + object := self.value.(*_goMapObject) + keys := object.value.MapKeys() + for _, key := range keys { + if !each(toValue(key).String()) { + return + } + } +} + +func goMapDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool { + object := self.value.(*_goMapObject) + // TODO ...or 0222 + if descriptor.mode != 0111 { + return self.runtime.typeErrorResult(throw) + } + if !descriptor.isDataDescriptor() { + return self.runtime.typeErrorResult(throw) + } + object.value.SetMapIndex(object.toKey(name), object.toValue(descriptor.value.(Value))) + return true +} + +func goMapDelete(self *_object, name string, throw bool) bool { + object := self.value.(*_goMapObject) + object.value.SetMapIndex(object.toKey(name), reflect.Value{}) + // FIXME + return true +} diff --git a/vendor/github.com/robertkrimen/otto/type_go_slice.go b/vendor/github.com/robertkrimen/otto/type_go_slice.go new file mode 100644 index 000000000..78c69e684 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_go_slice.go @@ -0,0 +1,126 @@ +package otto + +import ( + "reflect" + "strconv" +) + +func (runtime *_runtime) newGoSliceObject(value reflect.Value) *_object { + self := runtime.newObject() + self.class = "GoArray" // TODO GoSlice? + self.objectClass = _classGoSlice + self.value = _newGoSliceObject(value) + return self +} + +type _goSliceObject struct { + value reflect.Value +} + +func _newGoSliceObject(value reflect.Value) *_goSliceObject { + self := &_goSliceObject{ + value: value, + } + return self +} + +func (self _goSliceObject) getValue(index int64) (reflect.Value, bool) { + if index < int64(self.value.Len()) { + return self.value.Index(int(index)), true + } + return reflect.Value{}, false +} + +func (self _goSliceObject) setValue(index int64, value Value) bool { + indexValue, exists := self.getValue(index) + if !exists { + return false + } + reflectValue, err := value.toReflectValue(self.value.Type().Elem().Kind()) + if err != nil { + panic(err) + } + indexValue.Set(reflectValue) + return true +} + +func goSliceGetOwnProperty(self *_object, name string) *_property { + // length + if name == "length" { + return &_property{ + value: toValue(self.value.(*_goSliceObject).value.Len()), + mode: 0, + } + } + + // .0, .1, .2, ... + index := stringToArrayIndex(name) + if index >= 0 { + value := Value{} + reflectValue, exists := self.value.(*_goSliceObject).getValue(index) + if exists { + value = self.runtime.toValue(reflectValue.Interface()) + } + return &_property{ + value: value, + mode: 0110, + } + } + + // Other methods + if method := self.value.(*_goSliceObject).value.MethodByName(name); (method != reflect.Value{}) { + return &_property{ + value: self.runtime.toValue(method.Interface()), + mode: 0110, + } + } + + return objectGetOwnProperty(self, name) +} + +func goSliceEnumerate(self *_object, all bool, each func(string) bool) { + object := self.value.(*_goSliceObject) + // .0, .1, .2, ... + + for index, length := 0, object.value.Len(); index < length; index++ { + name := strconv.FormatInt(int64(index), 10) + if !each(name) { + return + } + } + + objectEnumerate(self, all, each) +} + +func goSliceDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool { + if name == "length" { + return self.runtime.typeErrorResult(throw) + } else if index := stringToArrayIndex(name); index >= 0 { + if self.value.(*_goSliceObject).setValue(index, descriptor.value.(Value)) { + return true + } + return self.runtime.typeErrorResult(throw) + } + return objectDefineOwnProperty(self, name, descriptor, throw) +} + +func goSliceDelete(self *_object, name string, throw bool) bool { + // length + if name == "length" { + return self.runtime.typeErrorResult(throw) + } + + // .0, .1, .2, ... + index := stringToArrayIndex(name) + if index >= 0 { + object := self.value.(*_goSliceObject) + indexValue, exists := object.getValue(index) + if exists { + indexValue.Set(reflect.Zero(object.value.Type().Elem())) + return true + } + return self.runtime.typeErrorResult(throw) + } + + return self.delete(name, throw) +} diff --git a/vendor/github.com/robertkrimen/otto/type_go_struct.go b/vendor/github.com/robertkrimen/otto/type_go_struct.go new file mode 100644 index 000000000..608ac6660 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_go_struct.go @@ -0,0 +1,146 @@ +package otto + +import ( + "encoding/json" + "reflect" +) + +// FIXME Make a note about not being able to modify a struct unless it was +// passed as a pointer-to: &struct{ ... } +// This seems to be a limitation of the reflect package. +// This goes for the other Go constructs too. +// I guess we could get around it by either: +// 1. Creating a new struct every time +// 2. Creating an addressable? struct in the constructor + +func (runtime *_runtime) newGoStructObject(value reflect.Value) *_object { + self := runtime.newObject() + self.class = "Object" // TODO Should this be something else? + self.objectClass = _classGoStruct + self.value = _newGoStructObject(value) + return self +} + +type _goStructObject struct { + value reflect.Value +} + +func _newGoStructObject(value reflect.Value) *_goStructObject { + if reflect.Indirect(value).Kind() != reflect.Struct { + dbgf("%/panic//%@: %v != reflect.Struct", value.Kind()) + } + self := &_goStructObject{ + value: value, + } + return self +} + +func (self _goStructObject) getValue(name string) reflect.Value { + if validGoStructName(name) { + // Do not reveal hidden or unexported fields + if field := reflect.Indirect(self.value).FieldByName(name); (field != reflect.Value{}) { + return field + } + + if method := self.value.MethodByName(name); (method != reflect.Value{}) { + return method + } + } + + return reflect.Value{} +} + +func (self _goStructObject) field(name string) (reflect.StructField, bool) { + return reflect.Indirect(self.value).Type().FieldByName(name) +} + +func (self _goStructObject) method(name string) (reflect.Method, bool) { + return reflect.Indirect(self.value).Type().MethodByName(name) +} + +func (self _goStructObject) setValue(name string, value Value) bool { + field, exists := self.field(name) + if !exists { + return false + } + fieldValue := self.getValue(name) + reflectValue, err := value.toReflectValue(field.Type.Kind()) + if err != nil { + panic(err) + } + fieldValue.Set(reflectValue) + + return true +} + +func goStructGetOwnProperty(self *_object, name string) *_property { + object := self.value.(*_goStructObject) + value := object.getValue(name) + if value.IsValid() { + return &_property{self.runtime.toValue(value.Interface()), 0110} + } + + return objectGetOwnProperty(self, name) +} + +func validGoStructName(name string) bool { + if name == "" { + return false + } + return 'A' <= name[0] && name[0] <= 'Z' // TODO What about Unicode? +} + +func goStructEnumerate(self *_object, all bool, each func(string) bool) { + object := self.value.(*_goStructObject) + + // Enumerate fields + for index := 0; index < reflect.Indirect(object.value).NumField(); index++ { + name := reflect.Indirect(object.value).Type().Field(index).Name + if validGoStructName(name) { + if !each(name) { + return + } + } + } + + // Enumerate methods + for index := 0; index < object.value.NumMethod(); index++ { + name := object.value.Type().Method(index).Name + if validGoStructName(name) { + if !each(name) { + return + } + } + } + + objectEnumerate(self, all, each) +} + +func goStructCanPut(self *_object, name string) bool { + object := self.value.(*_goStructObject) + value := object.getValue(name) + if value.IsValid() { + return true + } + + return objectCanPut(self, name) +} + +func goStructPut(self *_object, name string, value Value, throw bool) { + object := self.value.(*_goStructObject) + if object.setValue(name, value) { + return + } + + objectPut(self, name, value, throw) +} + +func goStructMarshalJSON(self *_object) json.Marshaler { + object := self.value.(*_goStructObject) + goValue := reflect.Indirect(object.value).Interface() + switch marshaler := goValue.(type) { + case json.Marshaler: + return marshaler + } + return nil +} diff --git a/vendor/github.com/robertkrimen/otto/type_number.go b/vendor/github.com/robertkrimen/otto/type_number.go new file mode 100644 index 000000000..28de4444c --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_number.go @@ -0,0 +1,5 @@ +package otto + +func (runtime *_runtime) newNumberObject(value Value) *_object { + return runtime.newPrimitiveObject("Number", value.numberValue()) +} diff --git a/vendor/github.com/robertkrimen/otto/type_reference.go b/vendor/github.com/robertkrimen/otto/type_reference.go new file mode 100644 index 000000000..fd770c6f4 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_reference.go @@ -0,0 +1,103 @@ +package otto + +type _reference interface { + invalid() bool // IsUnresolvableReference + getValue() Value // getValue + putValue(Value) string // PutValue + delete() bool +} + +// PropertyReference + +type _propertyReference struct { + name string + strict bool + base *_object + runtime *_runtime + at _at +} + +func newPropertyReference(rt *_runtime, base *_object, name string, strict bool, at _at) *_propertyReference { + return &_propertyReference{ + runtime: rt, + name: name, + strict: strict, + base: base, + at: at, + } +} + +func (self *_propertyReference) invalid() bool { + return self.base == nil +} + +func (self *_propertyReference) getValue() Value { + if self.base == nil { + panic(self.runtime.panicReferenceError("'%s' is not defined", self.name, self.at)) + } + return self.base.get(self.name) +} + +func (self *_propertyReference) putValue(value Value) string { + if self.base == nil { + return self.name + } + self.base.put(self.name, value, self.strict) + return "" +} + +func (self *_propertyReference) delete() bool { + if self.base == nil { + // TODO Throw an error if strict + return true + } + return self.base.delete(self.name, self.strict) +} + +// ArgumentReference + +func newArgumentReference(runtime *_runtime, base *_object, name string, strict bool, at _at) *_propertyReference { + if base == nil { + panic(hereBeDragons()) + } + return newPropertyReference(runtime, base, name, strict, at) +} + +type _stashReference struct { + name string + strict bool + base _stash +} + +func (self *_stashReference) invalid() bool { + return false // The base (an environment) will never be nil +} + +func (self *_stashReference) getValue() Value { + return self.base.getBinding(self.name, self.strict) +} + +func (self *_stashReference) putValue(value Value) string { + self.base.setValue(self.name, value, self.strict) + return "" +} + +func (self *_stashReference) delete() bool { + if self.base == nil { + // This should never be reached, but just in case + return false + } + return self.base.deleteBinding(self.name) +} + +// getIdentifierReference + +func getIdentifierReference(runtime *_runtime, stash _stash, name string, strict bool, at _at) _reference { + if stash == nil { + return newPropertyReference(runtime, nil, name, strict, at) + } + if stash.hasBinding(name) { + return stash.newReference(name, strict, at) + } + return getIdentifierReference(runtime, stash.outer(), name, strict, at) +} diff --git a/vendor/github.com/robertkrimen/otto/type_regexp.go b/vendor/github.com/robertkrimen/otto/type_regexp.go new file mode 100644 index 000000000..57fe31640 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_regexp.go @@ -0,0 +1,146 @@ +package otto + +import ( + "fmt" + "regexp" + "unicode/utf8" + + "github.com/robertkrimen/otto/parser" +) + +type _regExpObject struct { + regularExpression *regexp.Regexp + global bool + ignoreCase bool + multiline bool + source string + flags string +} + +func (runtime *_runtime) newRegExpObject(pattern string, flags string) *_object { + self := runtime.newObject() + self.class = "RegExp" + + global := false + ignoreCase := false + multiline := false + re2flags := "" + + // TODO Maybe clean up the panicking here... TypeError, SyntaxError, ? + + for _, chr := range flags { + switch chr { + case 'g': + if global { + panic(runtime.panicSyntaxError("newRegExpObject: %s %s", pattern, flags)) + } + global = true + case 'm': + if multiline { + panic(runtime.panicSyntaxError("newRegExpObject: %s %s", pattern, flags)) + } + multiline = true + re2flags += "m" + case 'i': + if ignoreCase { + panic(runtime.panicSyntaxError("newRegExpObject: %s %s", pattern, flags)) + } + ignoreCase = true + re2flags += "i" + } + } + + re2pattern, err := parser.TransformRegExp(pattern) + if err != nil { + panic(runtime.panicTypeError("Invalid regular expression: %s", err.Error())) + } + if len(re2flags) > 0 { + re2pattern = fmt.Sprintf("(?%s:%s)", re2flags, re2pattern) + } + + regularExpression, err := regexp.Compile(re2pattern) + if err != nil { + panic(runtime.panicSyntaxError("Invalid regular expression: %s", err.Error()[22:])) + } + + self.value = _regExpObject{ + regularExpression: regularExpression, + global: global, + ignoreCase: ignoreCase, + multiline: multiline, + source: pattern, + flags: flags, + } + self.defineProperty("global", toValue_bool(global), 0, false) + self.defineProperty("ignoreCase", toValue_bool(ignoreCase), 0, false) + self.defineProperty("multiline", toValue_bool(multiline), 0, false) + self.defineProperty("lastIndex", toValue_int(0), 0100, false) + self.defineProperty("source", toValue_string(pattern), 0, false) + return self +} + +func (self *_object) regExpValue() _regExpObject { + value, _ := self.value.(_regExpObject) + return value +} + +func execRegExp(this *_object, target string) (match bool, result []int) { + if this.class != "RegExp" { + panic(this.runtime.panicTypeError("Calling RegExp.exec on a non-RegExp object")) + } + lastIndex := this.get("lastIndex").number().int64 + index := lastIndex + global := this.get("global").bool() + if !global { + index = 0 + } + if 0 > index || index > int64(len(target)) { + } else { + result = this.regExpValue().regularExpression.FindStringSubmatchIndex(target[index:]) + } + if result == nil { + //this.defineProperty("lastIndex", toValue_(0), 0111, true) + this.put("lastIndex", toValue_int(0), true) + return // !match + } + match = true + startIndex := index + endIndex := int(lastIndex) + result[1] + // We do this shift here because the .FindStringSubmatchIndex above + // was done on a local subordinate slice of the string, not the whole string + for index, _ := range result { + result[index] += int(startIndex) + } + if global { + //this.defineProperty("lastIndex", toValue_(endIndex), 0111, true) + this.put("lastIndex", toValue_int(endIndex), true) + } + return // match +} + +func execResultToArray(runtime *_runtime, target string, result []int) *_object { + captureCount := len(result) / 2 + valueArray := make([]Value, captureCount) + for index := 0; index < captureCount; index++ { + offset := 2 * index + if result[offset] != -1 { + valueArray[index] = toValue_string(target[result[offset]:result[offset+1]]) + } else { + valueArray[index] = Value{} + } + } + matchIndex := result[0] + if matchIndex != 0 { + matchIndex = 0 + // Find the rune index in the string, not the byte index + for index := 0; index < result[0]; { + _, size := utf8.DecodeRuneInString(target[index:]) + matchIndex += 1 + index += size + } + } + match := runtime.newArrayOf(valueArray) + match.defineProperty("input", toValue_string(target), 0111, false) + match.defineProperty("index", toValue_int(matchIndex), 0111, false) + return match +} diff --git a/vendor/github.com/robertkrimen/otto/type_string.go b/vendor/github.com/robertkrimen/otto/type_string.go new file mode 100644 index 000000000..ef3afa42b --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/type_string.go @@ -0,0 +1,112 @@ +package otto + +import ( + "strconv" + "unicode/utf8" +) + +type _stringObject interface { + Length() int + At(int) rune + String() string +} + +type _stringASCII string + +func (str _stringASCII) Length() int { + return len(str) +} + +func (str _stringASCII) At(at int) rune { + return rune(str[at]) +} + +func (str _stringASCII) String() string { + return string(str) +} + +type _stringWide struct { + string string + length int + runes []rune +} + +func (str _stringWide) Length() int { + return str.length +} + +func (str _stringWide) At(at int) rune { + if str.runes == nil { + str.runes = []rune(str.string) + } + return str.runes[at] +} + +func (str _stringWide) String() string { + return str.string +} + +func _newStringObject(str string) _stringObject { + for i := 0; i < len(str); i++ { + if str[i] >= utf8.RuneSelf { + goto wide + } + } + + return _stringASCII(str) + +wide: + return &_stringWide{ + string: str, + length: utf8.RuneCountInString(str), + } +} + +func stringAt(str _stringObject, index int) rune { + if 0 <= index && index < str.Length() { + return str.At(index) + } + return utf8.RuneError +} + +func (runtime *_runtime) newStringObject(value Value) *_object { + str := _newStringObject(value.string()) + + self := runtime.newClassObject("String") + self.defineProperty("length", toValue_int(str.Length()), 0, false) + self.objectClass = _classString + self.value = str + return self +} + +func (self *_object) stringValue() _stringObject { + if str, ok := self.value.(_stringObject); ok { + return str + } + return nil +} + +func stringEnumerate(self *_object, all bool, each func(string) bool) { + if str := self.stringValue(); str != nil { + length := str.Length() + for index := 0; index < length; index++ { + if !each(strconv.FormatInt(int64(index), 10)) { + return + } + } + } + objectEnumerate(self, all, each) +} + +func stringGetOwnProperty(self *_object, name string) *_property { + if property := objectGetOwnProperty(self, name); property != nil { + return property + } + // TODO Test a string of length >= +int32 + 1? + if index := stringToArrayIndex(name); index >= 0 { + if chr := stringAt(self.stringValue(), int(index)); chr != utf8.RuneError { + return &_property{toValue_string(string(chr)), 0} + } + } + return nil +} diff --git a/vendor/github.com/robertkrimen/otto/underscore/Makefile b/vendor/github.com/robertkrimen/otto/underscore/Makefile new file mode 100644 index 000000000..fc872917f --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/underscore/Makefile @@ -0,0 +1,11 @@ +.PHONY: source + +source: source.go + +underscore.js: + curl -kL http://underscorejs.org/underscore.js > $@ + +source.go: underscore.js + go-bindata -f underscore -p underscore -u true < $< 2>/dev/null | grep -v '^//' | gofmt > $@ + head -4 $< >> $@ + mv $< .. diff --git a/vendor/github.com/robertkrimen/otto/underscore/README.markdown b/vendor/github.com/robertkrimen/otto/underscore/README.markdown new file mode 100644 index 000000000..bce37b695 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/underscore/README.markdown @@ -0,0 +1,53 @@ +# underscore +-- + import "github.com/robertkrimen/otto/underscore" + +Package underscore contains the source for the JavaScript utility-belt library. + + import ( + _ "github.com/robertkrimen/otto/underscore" + ) + // Every Otto runtime will now include underscore + +http://underscorejs.org + +https://github.com/documentcloud/underscore + +By importing this package, you'll automatically load underscore every time you +create a new Otto runtime. + +To prevent this behavior, you can do the following: + + import ( + "github.com/robertkrimen/otto/underscore" + ) + + func init() { + underscore.Disable() + } + +## Usage + +#### func Disable + +```go +func Disable() +``` +Disable underscore runtime inclusion. + +#### func Enable + +```go +func Enable() +``` +Enable underscore runtime inclusion. + +#### func Source + +```go +func Source() string +``` +Source returns the underscore source. + +-- +**godocdown** http://github.com/robertkrimen/godocdown diff --git a/vendor/github.com/robertkrimen/otto/underscore/source.go b/vendor/github.com/robertkrimen/otto/underscore/source.go new file mode 100644 index 000000000..7c5df9714 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/underscore/source.go @@ -0,0 +1,3463 @@ +package underscore + +func underscore() []byte { + return []byte{ + 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x55, 0x6e, 0x64, 0x65, 0x72, + 0x73, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x6a, 0x73, 0x20, 0x31, 0x2e, 0x34, + 0x2e, 0x34, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, + 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, + 0x6f, 0x72, 0x65, 0x6a, 0x73, 0x2e, 0x6f, 0x72, 0x67, 0x0a, 0x2f, 0x2f, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x63, 0x29, 0x20, 0x32, 0x30, 0x30, + 0x39, 0x2d, 0x32, 0x30, 0x31, 0x33, 0x20, 0x4a, 0x65, 0x72, 0x65, 0x6d, + 0x79, 0x20, 0x41, 0x73, 0x68, 0x6b, 0x65, 0x6e, 0x61, 0x73, 0x2c, 0x20, + 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x20, 0x49, 0x6e, 0x63, 0x2e, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, + 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, 0x20, 0x66, 0x72, 0x65, 0x65, + 0x6c, 0x79, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x64, 0x20, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x4d, 0x49, 0x54, 0x20, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, + 0x2e, 0x0a, 0x0a, 0x28, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x42, + 0x61, 0x73, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x20, 0x73, 0x65, 0x74, 0x75, + 0x70, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, + 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, + 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x6f, 0x6f, 0x74, 0x20, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x20, 0x60, 0x77, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x60, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, + 0x72, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x60, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x60, 0x20, 0x6f, 0x6e, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x0a, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, 0x6f, 0x6f, 0x74, 0x20, 0x3d, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x53, 0x61, 0x76, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x72, 0x65, + 0x76, 0x69, 0x6f, 0x75, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, + 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x60, 0x5f, 0x60, 0x20, 0x76, + 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x76, + 0x61, 0x72, 0x20, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x55, + 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x20, 0x3d, 0x20, + 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x5f, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x67, 0x65, 0x74, 0x73, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x72, 0x65, + 0x61, 0x6b, 0x20, 0x6f, 0x75, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x20, + 0x6c, 0x6f, 0x6f, 0x70, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x62, 0x72, + 0x65, 0x61, 0x6b, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x53, 0x61, 0x76, 0x65, 0x20, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x6d, 0x69, 0x6e, 0x69, 0x66, 0x69, 0x65, 0x64, 0x20, 0x28, 0x62, 0x75, + 0x74, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x67, 0x7a, 0x69, 0x70, 0x70, 0x65, + 0x64, 0x29, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x0a, + 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x2c, 0x20, + 0x4f, 0x62, 0x6a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x3d, 0x20, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, + 0x79, 0x70, 0x65, 0x2c, 0x20, 0x46, 0x75, 0x6e, 0x63, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x20, 0x3d, 0x20, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x3b, + 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x20, 0x71, 0x75, 0x69, 0x63, 0x6b, 0x20, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x73, 0x70, 0x65, 0x65, + 0x64, 0x20, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x20, 0x74, 0x6f, 0x20, + 0x63, 0x6f, 0x72, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x70, + 0x75, 0x73, 0x68, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x70, 0x75, 0x73, 0x68, 0x2c, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3d, 0x20, + 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, + 0x6c, 0x69, 0x63, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x63, 0x61, 0x74, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x63, 0x61, 0x74, + 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3d, 0x20, 0x4f, 0x62, 0x6a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x74, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x68, 0x61, 0x73, 0x4f, 0x77, 0x6e, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x20, 0x20, 0x20, 0x3d, 0x20, 0x4f, + 0x62, 0x6a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x68, 0x61, 0x73, 0x4f, + 0x77, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x6c, 0x6c, 0x20, 0x2a, 0x2a, + 0x45, 0x43, 0x4d, 0x41, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, 0x35, + 0x2a, 0x2a, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x6d, 0x70, 0x6c, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x77, 0x65, 0x20, 0x68, 0x6f, 0x70, 0x65, 0x20, + 0x74, 0x6f, 0x20, 0x75, 0x73, 0x65, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x65, 0x64, + 0x20, 0x68, 0x65, 0x72, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x46, + 0x6f, 0x72, 0x45, 0x61, 0x63, 0x68, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x66, 0x6f, 0x72, 0x45, 0x61, 0x63, 0x68, 0x2c, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x70, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3d, 0x20, 0x41, + 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x61, + 0x70, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, 0x2c, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x64, + 0x75, 0x63, 0x65, 0x52, 0x69, 0x67, 0x68, 0x74, 0x20, 0x20, 0x3d, 0x20, + 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, + 0x65, 0x64, 0x75, 0x63, 0x65, 0x52, 0x69, 0x67, 0x68, 0x74, 0x2c, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3d, + 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x45, 0x76, 0x65, 0x72, 0x79, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, + 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x65, 0x76, 0x65, 0x72, + 0x79, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x53, 0x6f, 0x6d, 0x65, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x73, 0x6f, 0x6d, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x4f, 0x66, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3d, 0x20, 0x41, 0x72, + 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x4f, 0x66, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x4f, 0x66, 0x20, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, 0x61, 0x73, 0x74, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x4f, 0x66, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6e, + 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, + 0x79, 0x2e, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4b, 0x65, 0x79, + 0x73, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3d, 0x20, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2c, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, + 0x69, 0x6e, 0x64, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3d, 0x20, 0x46, 0x75, 0x6e, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x62, 0x69, 0x6e, 0x64, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x20, 0x61, 0x20, 0x73, 0x61, 0x66, + 0x65, 0x20, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, + 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x55, 0x6e, 0x64, 0x65, 0x72, + 0x73, 0x63, 0x6f, 0x72, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x75, 0x73, 0x65, 0x20, 0x62, 0x65, 0x6c, + 0x6f, 0x77, 0x2e, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x5f, 0x20, + 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, + 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x6f, 0x66, 0x20, 0x5f, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x21, 0x28, 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x6f, 0x66, 0x20, 0x5f, 0x29, + 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x65, 0x77, + 0x20, 0x5f, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x64, 0x20, 0x3d, 0x20, 0x6f, 0x62, 0x6a, 0x3b, 0x0a, 0x20, 0x20, + 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x45, 0x78, 0x70, + 0x6f, 0x72, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x55, 0x6e, 0x64, 0x65, + 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x2a, 0x2a, 0x4e, 0x6f, 0x64, 0x65, + 0x2e, 0x6a, 0x73, 0x2a, 0x2a, 0x2c, 0x20, 0x77, 0x69, 0x74, 0x68, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x77, 0x61, 0x72, + 0x64, 0x73, 0x2d, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x6f, 0x6c, 0x64, 0x20, 0x60, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x28, 0x29, 0x60, 0x20, 0x41, 0x50, 0x49, 0x2e, 0x20, 0x49, 0x66, + 0x20, 0x77, 0x65, 0x27, 0x72, 0x65, 0x20, 0x69, 0x6e, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x72, 0x6f, 0x77, 0x73, + 0x65, 0x72, 0x2c, 0x20, 0x61, 0x64, 0x64, 0x20, 0x60, 0x5f, 0x60, 0x20, + 0x61, 0x73, 0x20, 0x61, 0x20, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x20, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x76, 0x69, 0x61, 0x20, 0x61, + 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2c, 0x0a, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x43, 0x6c, 0x6f, 0x73, 0x75, 0x72, 0x65, + 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x20, 0x22, 0x61, + 0x64, 0x76, 0x61, 0x6e, 0x63, 0x65, 0x64, 0x22, 0x20, 0x6d, 0x6f, 0x64, + 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x79, 0x70, + 0x65, 0x6f, 0x66, 0x20, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x20, + 0x21, 0x3d, 0x3d, 0x20, 0x27, 0x75, 0x6e, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, + 0x66, 0x20, 0x28, 0x74, 0x79, 0x70, 0x65, 0x6f, 0x66, 0x20, 0x6d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x27, 0x75, 0x6e, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x27, 0x20, 0x26, 0x26, 0x20, + 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x65, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x20, 0x3d, 0x20, 0x6d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, + 0x20, 0x3d, 0x20, 0x5f, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x2e, + 0x5f, 0x20, 0x3d, 0x20, 0x5f, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x20, 0x65, + 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x6f, + 0x6f, 0x74, 0x2e, 0x5f, 0x20, 0x3d, 0x20, 0x5f, 0x3b, 0x0a, 0x20, 0x20, + 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, + 0x20, 0x3d, 0x20, 0x27, 0x31, 0x2e, 0x34, 0x2e, 0x34, 0x27, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x2d, 0x2d, 0x2d, 0x2d, + 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, + 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x54, + 0x68, 0x65, 0x20, 0x63, 0x6f, 0x72, 0x6e, 0x65, 0x72, 0x73, 0x74, 0x6f, + 0x6e, 0x65, 0x2c, 0x20, 0x61, 0x6e, 0x20, 0x60, 0x65, 0x61, 0x63, 0x68, + 0x60, 0x20, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x61, 0x6b, 0x61, 0x20, 0x60, 0x66, + 0x6f, 0x72, 0x45, 0x61, 0x63, 0x68, 0x60, 0x2e, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x20, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x2d, 0x69, 0x6e, 0x20, + 0x60, 0x66, 0x6f, 0x72, 0x45, 0x61, 0x63, 0x68, 0x60, 0x2c, 0x20, 0x61, + 0x72, 0x72, 0x61, 0x79, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x72, + 0x61, 0x77, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2e, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x2a, 0x2a, 0x45, 0x43, 0x4d, 0x41, + 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, 0x35, 0x2a, 0x2a, 0x27, 0x73, + 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, 0x60, 0x66, 0x6f, 0x72, + 0x45, 0x61, 0x63, 0x68, 0x60, 0x20, 0x69, 0x66, 0x20, 0x61, 0x76, 0x61, + 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x76, 0x61, + 0x72, 0x20, 0x65, 0x61, 0x63, 0x68, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x65, + 0x61, 0x63, 0x68, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x66, 0x6f, 0x72, 0x45, + 0x61, 0x63, 0x68, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x6f, 0x62, 0x6a, 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, + 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x46, 0x6f, 0x72, 0x45, 0x61, 0x63, 0x68, 0x20, 0x26, 0x26, 0x20, 0x6f, + 0x62, 0x6a, 0x2e, 0x66, 0x6f, 0x72, 0x45, 0x61, 0x63, 0x68, 0x20, 0x3d, + 0x3d, 0x3d, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x46, 0x6f, 0x72, + 0x45, 0x61, 0x63, 0x68, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x66, 0x6f, 0x72, 0x45, 0x61, 0x63, + 0x68, 0x28, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x6f, 0x62, 0x6a, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, + 0x3d, 0x3d, 0x3d, 0x20, 0x2b, 0x6f, 0x62, 0x6a, 0x2e, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x76, 0x61, 0x72, 0x20, 0x69, 0x20, + 0x3d, 0x20, 0x30, 0x2c, 0x20, 0x6c, 0x20, 0x3d, 0x20, 0x6f, 0x62, 0x6a, + 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x3b, 0x20, 0x69, 0x20, 0x3c, + 0x20, 0x6c, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, + 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x6f, 0x62, + 0x6a, 0x5b, 0x69, 0x5d, 0x2c, 0x20, 0x69, 0x2c, 0x20, 0x6f, 0x62, 0x6a, + 0x29, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, + 0x72, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x76, 0x61, 0x72, 0x20, 0x6b, + 0x65, 0x79, 0x20, 0x69, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x5f, 0x2e, 0x68, 0x61, 0x73, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, + 0x6b, 0x65, 0x79, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x6f, 0x62, 0x6a, + 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x2c, 0x20, + 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x62, 0x72, 0x65, + 0x61, 0x6b, 0x65, 0x72, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x61, + 0x70, 0x70, 0x6c, 0x79, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, + 0x65, 0x61, 0x63, 0x68, 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x2a, 0x2a, 0x45, 0x43, + 0x4d, 0x41, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, 0x35, 0x2a, 0x2a, + 0x27, 0x73, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, 0x60, 0x6d, + 0x61, 0x70, 0x60, 0x20, 0x69, 0x66, 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x6d, 0x61, + 0x70, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, + 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6e, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x70, 0x20, 0x26, 0x26, 0x20, 0x6f, + 0x62, 0x6a, 0x2e, 0x6d, 0x61, 0x70, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x6e, + 0x61, 0x74, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x70, 0x29, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x6d, 0x61, 0x70, + 0x28, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x65, 0x61, 0x63, 0x68, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, + 0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5b, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5d, 0x20, + 0x3d, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x63, + 0x61, 0x6c, 0x6c, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x2c, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x76, 0x61, + 0x72, 0x20, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x20, 0x3d, 0x20, 0x27, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x20, + 0x6f, 0x66, 0x20, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x20, 0x61, 0x72, 0x72, + 0x61, 0x79, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x6e, 0x6f, 0x20, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x27, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x2a, 0x2a, 0x52, + 0x65, 0x64, 0x75, 0x63, 0x65, 0x2a, 0x2a, 0x20, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x73, 0x20, 0x75, 0x70, 0x20, 0x61, 0x20, 0x73, 0x69, 0x6e, 0x67, + 0x6c, 0x65, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x66, 0x72, + 0x6f, 0x6d, 0x20, 0x61, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2c, 0x20, 0x61, 0x6b, 0x61, + 0x20, 0x60, 0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x60, 0x2c, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x6f, 0x72, 0x20, 0x60, 0x66, 0x6f, 0x6c, 0x64, + 0x6c, 0x60, 0x2e, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, + 0x73, 0x20, 0x74, 0x6f, 0x20, 0x2a, 0x2a, 0x45, 0x43, 0x4d, 0x41, 0x53, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, 0x35, 0x2a, 0x2a, 0x27, 0x73, 0x20, + 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, 0x60, 0x72, 0x65, 0x64, 0x75, + 0x63, 0x65, 0x60, 0x20, 0x69, 0x66, 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x72, 0x65, + 0x64, 0x75, 0x63, 0x65, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x66, 0x6f, 0x6c, + 0x64, 0x6c, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x69, 0x6e, 0x6a, 0x65, 0x63, + 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x2c, 0x20, 0x6d, 0x65, 0x6d, 0x6f, 0x2c, 0x20, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x20, 0x3d, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3e, 0x20, 0x32, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, + 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x6f, 0x62, + 0x6a, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, + 0x64, 0x75, 0x63, 0x65, 0x20, 0x26, 0x26, 0x20, 0x6f, 0x62, 0x6a, 0x2e, + 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x6e, + 0x61, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x20, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x62, + 0x69, 0x6e, 0x64, 0x28, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x20, 0x3f, 0x20, 0x6f, + 0x62, 0x6a, 0x2e, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, 0x28, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x6d, 0x65, 0x6d, 0x6f, + 0x29, 0x20, 0x3a, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x72, 0x65, 0x64, 0x75, + 0x63, 0x65, 0x28, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x65, 0x61, 0x63, 0x68, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, 0x73, + 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, + 0x66, 0x20, 0x28, 0x21, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, + 0x65, 0x6d, 0x6f, 0x20, 0x3d, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, + 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x6d, 0x65, 0x6d, 0x6f, 0x20, 0x3d, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x6d, 0x65, 0x6d, 0x6f, 0x2c, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x2c, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x29, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, + 0x20, 0x6e, 0x65, 0x77, 0x20, 0x54, 0x79, 0x70, 0x65, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x28, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x6d, 0x65, 0x6d, 0x6f, 0x3b, 0x0a, 0x20, 0x20, + 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x54, 0x68, 0x65, + 0x20, 0x72, 0x69, 0x67, 0x68, 0x74, 0x2d, 0x61, 0x73, 0x73, 0x6f, 0x63, + 0x69, 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, + 0x2c, 0x20, 0x61, 0x6c, 0x73, 0x6f, 0x20, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, + 0x20, 0x61, 0x73, 0x20, 0x60, 0x66, 0x6f, 0x6c, 0x64, 0x72, 0x60, 0x2e, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x2a, 0x2a, 0x45, 0x43, 0x4d, + 0x41, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, 0x35, 0x2a, 0x2a, 0x27, + 0x73, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, 0x60, 0x72, 0x65, + 0x64, 0x75, 0x63, 0x65, 0x52, 0x69, 0x67, 0x68, 0x74, 0x60, 0x20, 0x69, + 0x66, 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, 0x52, + 0x69, 0x67, 0x68, 0x74, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x66, 0x6f, 0x6c, + 0x64, 0x72, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x6d, 0x65, 0x6d, 0x6f, 0x2c, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x20, 0x3d, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3e, 0x20, 0x32, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, + 0x6a, 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x6f, + 0x62, 0x6a, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x52, + 0x65, 0x64, 0x75, 0x63, 0x65, 0x52, 0x69, 0x67, 0x68, 0x74, 0x20, 0x26, + 0x26, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, + 0x52, 0x69, 0x67, 0x68, 0x74, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x6e, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x52, 0x69, + 0x67, 0x68, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x29, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x3d, + 0x20, 0x5f, 0x2e, 0x62, 0x69, 0x6e, 0x64, 0x28, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x20, 0x3f, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x72, 0x65, 0x64, 0x75, 0x63, + 0x65, 0x52, 0x69, 0x67, 0x68, 0x74, 0x28, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x6d, 0x65, 0x6d, 0x6f, 0x29, 0x20, 0x3a, + 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, 0x52, + 0x69, 0x67, 0x68, 0x74, 0x28, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x20, 0x3d, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x2b, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x73, 0x20, + 0x3d, 0x20, 0x5f, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x28, 0x6f, 0x62, 0x6a, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x20, 0x3d, 0x20, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, 0x28, 0x6f, 0x62, + 0x6a, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x2c, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x3d, 0x20, + 0x6b, 0x65, 0x79, 0x73, 0x20, 0x3f, 0x20, 0x6b, 0x65, 0x79, 0x73, 0x5b, + 0x2d, 0x2d, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5d, 0x20, 0x3a, 0x20, + 0x2d, 0x2d, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x6d, 0x6f, 0x20, 0x3d, 0x20, 0x6f, + 0x62, 0x6a, 0x5b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5d, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, + 0x6d, 0x6f, 0x20, 0x3d, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x2c, 0x20, 0x6d, 0x65, 0x6d, 0x6f, 0x2c, 0x20, 0x6f, 0x62, + 0x6a, 0x5b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5d, 0x2c, 0x20, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, + 0x21, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x29, 0x20, 0x74, 0x68, + 0x72, 0x6f, 0x77, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x54, 0x79, 0x70, 0x65, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6d, 0x65, 0x6d, 0x6f, 0x3b, + 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, + 0x69, 0x72, 0x73, 0x74, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x77, + 0x68, 0x69, 0x63, 0x68, 0x20, 0x70, 0x61, 0x73, 0x73, 0x65, 0x73, 0x20, + 0x61, 0x20, 0x74, 0x72, 0x75, 0x74, 0x68, 0x20, 0x74, 0x65, 0x73, 0x74, + 0x2e, 0x20, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x64, 0x20, 0x61, 0x73, + 0x20, 0x60, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x60, 0x2e, 0x0a, 0x20, + 0x20, 0x5f, 0x2e, 0x66, 0x69, 0x6e, 0x64, 0x20, 0x3d, 0x20, 0x5f, 0x2e, + 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x61, 0x6e, 0x79, 0x28, 0x6f, 0x62, 0x6a, 0x2c, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, + 0x6c, 0x69, 0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x29, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x70, 0x61, 0x73, 0x73, 0x20, 0x61, 0x20, 0x74, + 0x72, 0x75, 0x74, 0x68, 0x20, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, + 0x73, 0x20, 0x74, 0x6f, 0x20, 0x2a, 0x2a, 0x45, 0x43, 0x4d, 0x41, 0x53, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, 0x35, 0x2a, 0x2a, 0x27, 0x73, 0x20, + 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, 0x60, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x60, 0x20, 0x69, 0x66, 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x64, 0x20, 0x61, 0x73, 0x20, 0x60, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x60, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, + 0x61, 0x72, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x20, 0x3d, + 0x20, 0x5b, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x6f, 0x62, 0x6a, 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, + 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x20, 0x26, 0x26, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x29, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x28, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, 0x28, 0x6f, 0x62, + 0x6a, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x2c, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, 0x73, + 0x74, 0x29, 0x29, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5b, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x5d, 0x20, 0x3d, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, + 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x77, 0x68, 0x69, 0x63, + 0x68, 0x20, 0x61, 0x20, 0x74, 0x72, 0x75, 0x74, 0x68, 0x20, 0x74, 0x65, + 0x73, 0x74, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x0a, 0x20, 0x20, + 0x5f, 0x2e, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x3d, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, + 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, + 0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x21, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, 0x73, + 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, 0x65, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x20, 0x77, 0x68, 0x65, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x20, 0x61, 0x20, 0x74, 0x72, 0x75, 0x74, 0x68, 0x20, + 0x74, 0x65, 0x73, 0x74, 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, + 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, + 0x2a, 0x2a, 0x45, 0x43, 0x4d, 0x41, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x20, 0x35, 0x2a, 0x2a, 0x27, 0x73, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x20, 0x60, 0x65, 0x76, 0x65, 0x72, 0x79, 0x60, 0x20, 0x69, 0x66, + 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x64, + 0x20, 0x61, 0x73, 0x20, 0x60, 0x61, 0x6c, 0x6c, 0x60, 0x2e, 0x0a, 0x20, + 0x20, 0x5f, 0x2e, 0x65, 0x76, 0x65, 0x72, 0x79, 0x20, 0x3d, 0x20, 0x5f, + 0x2e, 0x61, 0x6c, 0x6c, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x69, 0x74, 0x65, + 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x7c, 0x7c, 0x20, 0x28, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x5f, 0x2e, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x20, 0x3d, 0x3d, + 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x45, 0x76, 0x65, 0x72, 0x79, 0x20, 0x26, 0x26, 0x20, 0x6f, 0x62, 0x6a, + 0x2e, 0x65, 0x76, 0x65, 0x72, 0x79, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x6e, + 0x61, 0x74, 0x69, 0x76, 0x65, 0x45, 0x76, 0x65, 0x72, 0x79, 0x29, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x65, + 0x76, 0x65, 0x72, 0x79, 0x28, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, 0x28, 0x6f, 0x62, + 0x6a, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x2c, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x28, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x20, 0x26, 0x26, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x29, 0x29, + 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x62, 0x72, 0x65, + 0x61, 0x6b, 0x65, 0x72, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x21, 0x21, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, + 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, 0x65, + 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x20, 0x69, 0x66, 0x20, 0x61, + 0x74, 0x20, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x20, 0x6f, 0x6e, 0x65, 0x20, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x69, 0x6e, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x73, 0x20, 0x61, 0x20, 0x74, 0x72, 0x75, 0x74, + 0x68, 0x20, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, + 0x6f, 0x20, 0x2a, 0x2a, 0x45, 0x43, 0x4d, 0x41, 0x53, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x20, 0x35, 0x2a, 0x2a, 0x27, 0x73, 0x20, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x20, 0x60, 0x73, 0x6f, 0x6d, 0x65, 0x60, 0x20, 0x69, + 0x66, 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, + 0x64, 0x20, 0x61, 0x73, 0x20, 0x60, 0x61, 0x6e, 0x79, 0x60, 0x2e, 0x0a, + 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, 0x6e, 0x79, 0x20, 0x3d, 0x20, + 0x5f, 0x2e, 0x73, 0x6f, 0x6d, 0x65, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x61, + 0x6e, 0x79, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x20, 0x7c, 0x7c, 0x20, 0x28, 0x69, 0x74, 0x65, + 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, + 0x3d, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x20, 0x3d, 0x3d, 0x20, + 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, + 0x6f, 0x6d, 0x65, 0x20, 0x26, 0x26, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x73, + 0x6f, 0x6d, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x6e, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x53, 0x6f, 0x6d, 0x65, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x73, 0x6f, 0x6d, 0x65, 0x28, + 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x65, 0x61, 0x63, 0x68, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, 0x73, + 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, + 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x7c, 0x7c, + 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, + 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, + 0x6c, 0x69, 0x73, 0x74, 0x29, 0x29, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x21, 0x21, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x44, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x65, 0x20, 0x69, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x72, + 0x61, 0x79, 0x20, 0x6f, 0x72, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x20, 0x61, 0x20, + 0x67, 0x69, 0x76, 0x65, 0x6e, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, + 0x28, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x60, 0x3d, 0x3d, 0x3d, 0x60, + 0x29, 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x65, 0x64, 0x20, 0x61, 0x73, 0x20, 0x60, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x60, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x20, 0x3d, 0x3d, 0x20, + 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x4f, 0x66, 0x20, 0x26, 0x26, 0x20, 0x6f, 0x62, 0x6a, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x20, 0x3d, 0x3d, 0x3d, + 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x4f, 0x66, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, + 0x62, 0x6a, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x28, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x2d, 0x31, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x61, 0x6e, 0x79, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, + 0x3d, 0x3d, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, + 0x20, 0x61, 0x20, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x20, 0x28, 0x77, + 0x69, 0x74, 0x68, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x29, 0x20, 0x6f, 0x6e, 0x20, 0x65, 0x76, 0x65, 0x72, 0x79, 0x20, + 0x69, 0x74, 0x65, 0x6d, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x20, 0x20, + 0x5f, 0x2e, 0x69, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x20, 0x3d, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, + 0x20, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, 0x72, 0x67, 0x73, 0x20, + 0x3d, 0x20, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, + 0x28, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, + 0x32, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x69, 0x73, 0x46, 0x75, 0x6e, 0x63, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x69, + 0x73, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x6d, 0x61, 0x70, 0x28, + 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x28, 0x69, 0x73, 0x46, 0x75, 0x6e, 0x63, 0x20, 0x3f, 0x20, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x20, 0x3a, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x5b, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5d, 0x29, 0x2e, 0x61, 0x70, + 0x70, 0x6c, 0x79, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x61, + 0x72, 0x67, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x6e, 0x69, 0x65, 0x6e, 0x63, 0x65, + 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, + 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, + 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x60, 0x6d, 0x61, + 0x70, 0x60, 0x3a, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, + 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x2e, + 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x70, 0x6c, 0x75, 0x63, 0x6b, 0x20, 0x3d, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, + 0x6a, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x6d, + 0x61, 0x70, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x7b, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x3b, 0x20, 0x7d, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, + 0x6f, 0x6e, 0x76, 0x65, 0x6e, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x20, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x20, 0x63, + 0x61, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x60, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x60, 0x3a, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6e, 0x67, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x20, 0x60, 0x6b, 0x65, 0x79, 0x3a, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x60, 0x20, 0x70, 0x61, 0x69, 0x72, 0x73, 0x2e, 0x0a, + 0x20, 0x20, 0x5f, 0x2e, 0x77, 0x68, 0x65, 0x72, 0x65, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, + 0x2c, 0x20, 0x61, 0x74, 0x74, 0x72, 0x73, 0x2c, 0x20, 0x66, 0x69, 0x72, + 0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x5f, 0x2e, 0x69, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, + 0x61, 0x74, 0x74, 0x72, 0x73, 0x29, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x66, 0x69, 0x72, 0x73, 0x74, 0x20, 0x3f, 0x20, 0x6e, + 0x75, 0x6c, 0x6c, 0x20, 0x3a, 0x20, 0x5b, 0x5d, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x5b, 0x66, + 0x69, 0x72, 0x73, 0x74, 0x20, 0x3f, 0x20, 0x27, 0x66, 0x69, 0x6e, 0x64, + 0x27, 0x20, 0x3a, 0x20, 0x27, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x27, + 0x5d, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, + 0x76, 0x61, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x74, 0x74, 0x72, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x61, 0x74, 0x74, 0x72, + 0x73, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x29, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x72, + 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, + 0x6f, 0x6e, 0x76, 0x65, 0x6e, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x20, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x20, 0x63, + 0x61, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x60, 0x66, 0x69, 0x6e, 0x64, + 0x60, 0x3a, 0x20, 0x67, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x66, 0x69, 0x72, 0x73, 0x74, 0x20, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x20, 0x60, 0x6b, 0x65, 0x79, 0x3a, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x60, 0x20, 0x70, 0x61, 0x69, 0x72, 0x73, 0x2e, 0x0a, + 0x20, 0x20, 0x5f, 0x2e, 0x66, 0x69, 0x6e, 0x64, 0x57, 0x68, 0x65, 0x72, + 0x65, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x61, 0x74, 0x74, 0x72, 0x73, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x5f, 0x2e, 0x77, 0x68, 0x65, 0x72, 0x65, 0x28, 0x6f, 0x62, + 0x6a, 0x2c, 0x20, 0x61, 0x74, 0x74, 0x72, 0x73, 0x2c, 0x20, 0x74, 0x72, + 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x6f, 0x72, 0x20, 0x28, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, 0x64, + 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x29, 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6e, 0x27, + 0x74, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x20, 0x61, + 0x72, 0x72, 0x61, 0x79, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x69, 0x6e, 0x74, + 0x65, 0x67, 0x65, 0x72, 0x73, 0x20, 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x72, + 0x20, 0x74, 0x68, 0x61, 0x6e, 0x20, 0x36, 0x35, 0x2c, 0x35, 0x33, 0x35, + 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x53, 0x65, 0x65, 0x3a, 0x20, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x62, 0x75, 0x67, 0x73, 0x2e, 0x77, 0x65, + 0x62, 0x6b, 0x69, 0x74, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x73, 0x68, 0x6f, + 0x77, 0x5f, 0x62, 0x75, 0x67, 0x2e, 0x63, 0x67, 0x69, 0x3f, 0x69, 0x64, + 0x3d, 0x38, 0x30, 0x37, 0x39, 0x37, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x6d, + 0x61, 0x78, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, + 0x21, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x26, 0x26, + 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x28, 0x6f, + 0x62, 0x6a, 0x29, 0x20, 0x26, 0x26, 0x20, 0x6f, 0x62, 0x6a, 0x5b, 0x30, + 0x5d, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x2b, 0x6f, 0x62, 0x6a, 0x5b, 0x30, + 0x5d, 0x20, 0x26, 0x26, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x20, 0x3c, 0x20, 0x36, 0x35, 0x35, 0x33, 0x35, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x6d, 0x61, 0x78, + 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x4d, 0x61, 0x74, 0x68, 0x2c, + 0x20, 0x6f, 0x62, 0x6a, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x26, 0x26, 0x20, 0x5f, 0x2e, + 0x69, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x6f, 0x62, 0x6a, 0x29, + 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x2d, 0x49, 0x6e, + 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, + 0x20, 0x7b, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x20, 0x3a, + 0x20, 0x2d, 0x49, 0x6e, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x2c, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x2d, 0x49, 0x6e, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x79, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, + 0x61, 0x63, 0x68, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, + 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, 0x73, 0x74, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, + 0x72, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x20, 0x3d, + 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x3f, 0x20, + 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, + 0x6c, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, + 0x20, 0x6c, 0x69, 0x73, 0x74, 0x29, 0x20, 0x3a, 0x20, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x20, 0x3e, 0x3d, 0x20, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, + 0x64, 0x20, 0x26, 0x26, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x20, 0x3d, 0x20, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3a, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, + 0x74, 0x65, 0x64, 0x20, 0x3a, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x64, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x20, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x28, 0x6f, 0x72, 0x20, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, 0x64, 0x20, 0x63, + 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x29, 0x2e, + 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x6d, 0x69, 0x6e, 0x20, 0x3d, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, + 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x20, 0x26, 0x26, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x41, + 0x72, 0x72, 0x61, 0x79, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x26, 0x26, + 0x20, 0x6f, 0x62, 0x6a, 0x5b, 0x30, 0x5d, 0x20, 0x3d, 0x3d, 0x3d, 0x20, + 0x2b, 0x6f, 0x62, 0x6a, 0x5b, 0x30, 0x5d, 0x20, 0x26, 0x26, 0x20, 0x6f, + 0x62, 0x6a, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3c, 0x20, + 0x36, 0x35, 0x35, 0x33, 0x35, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x4d, 0x61, + 0x74, 0x68, 0x2e, 0x6d, 0x69, 0x6e, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, + 0x28, 0x4d, 0x61, 0x74, 0x68, 0x2c, 0x20, 0x6f, 0x62, 0x6a, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, + 0x66, 0x20, 0x28, 0x21, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x20, 0x26, 0x26, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x49, 0x6e, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x7b, 0x63, 0x6f, 0x6d, 0x70, 0x75, + 0x74, 0x65, 0x64, 0x20, 0x3a, 0x20, 0x49, 0x6e, 0x66, 0x69, 0x6e, 0x69, + 0x74, 0x79, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x49, + 0x6e, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x7d, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, + 0x69, 0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, + 0x64, 0x20, 0x3d, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x20, 0x3f, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, + 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x29, 0x20, 0x3a, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x20, 0x3c, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x75, + 0x74, 0x65, 0x64, 0x20, 0x26, 0x26, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, + 0x3a, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x63, 0x6f, 0x6d, + 0x70, 0x75, 0x74, 0x65, 0x64, 0x20, 0x3a, 0x20, 0x63, 0x6f, 0x6d, 0x70, + 0x75, 0x74, 0x65, 0x64, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x53, 0x68, 0x75, 0x66, 0x66, 0x6c, 0x65, 0x20, + 0x61, 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x0a, 0x20, 0x20, + 0x5f, 0x2e, 0x73, 0x68, 0x75, 0x66, 0x66, 0x6c, 0x65, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x72, 0x61, 0x6e, 0x64, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, + 0x72, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x3d, 0x20, 0x30, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x73, 0x68, 0x75, + 0x66, 0x66, 0x6c, 0x65, 0x64, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, 0x28, 0x6f, 0x62, 0x6a, + 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x61, 0x6e, 0x64, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x72, + 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x28, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2b, + 0x2b, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x68, + 0x75, 0x66, 0x66, 0x6c, 0x65, 0x64, 0x5b, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x20, 0x2d, 0x20, 0x31, 0x5d, 0x20, 0x3d, 0x20, 0x73, 0x68, 0x75, 0x66, + 0x66, 0x6c, 0x65, 0x64, 0x5b, 0x72, 0x61, 0x6e, 0x64, 0x5d, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x68, 0x75, 0x66, 0x66, 0x6c, + 0x65, 0x64, 0x5b, 0x72, 0x61, 0x6e, 0x64, 0x5d, 0x20, 0x3d, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x73, 0x68, 0x75, 0x66, 0x66, 0x6c, 0x65, 0x64, 0x3b, 0x0a, 0x20, + 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x6e, + 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x20, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x2e, + 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, + 0x20, 0x3f, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3a, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, + 0x7b, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, + 0x5b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5d, 0x3b, 0x20, 0x7d, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x53, + 0x6f, 0x72, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x27, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, + 0x62, 0x79, 0x20, 0x61, 0x20, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, + 0x6f, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x20, + 0x62, 0x79, 0x20, 0x61, 0x6e, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x73, 0x6f, 0x72, 0x74, + 0x42, 0x79, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x69, 0x74, 0x65, + 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x28, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x70, 0x6c, 0x75, 0x63, + 0x6b, 0x28, 0x5f, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x6f, 0x62, 0x6a, 0x2c, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, + 0x6c, 0x69, 0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x20, 0x3a, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x20, + 0x3a, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x20, 0x3a, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x29, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x29, 0x2e, 0x73, 0x6f, 0x72, 0x74, 0x28, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6c, 0x65, 0x66, 0x74, 0x2c, 0x20, 0x72, + 0x69, 0x67, 0x68, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, 0x20, 0x3d, 0x20, 0x6c, 0x65, + 0x66, 0x74, 0x2e, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x62, + 0x20, 0x3d, 0x20, 0x72, 0x69, 0x67, 0x68, 0x74, 0x2e, 0x63, 0x72, 0x69, + 0x74, 0x65, 0x72, 0x69, 0x61, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x61, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x62, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x61, 0x20, 0x3e, 0x20, 0x62, 0x20, 0x7c, 0x7c, + 0x20, 0x61, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, + 0x30, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x31, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x61, 0x20, 0x3c, 0x20, 0x62, 0x20, 0x7c, 0x7c, 0x20, 0x62, 0x20, + 0x3d, 0x3d, 0x3d, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x30, 0x29, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x2d, 0x31, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6c, 0x65, 0x66, 0x74, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x3c, 0x20, 0x72, 0x69, 0x67, + 0x68, 0x74, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x3f, 0x20, 0x2d, + 0x31, 0x20, 0x3a, 0x20, 0x31, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x29, 0x2c, 0x20, 0x27, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x27, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x41, 0x6e, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, + 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x20, 0x22, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x20, 0x62, + 0x79, 0x22, 0x20, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x62, + 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x76, 0x61, 0x72, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x49, 0x74, 0x65, + 0x72, 0x61, 0x74, 0x6f, 0x72, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, + 0x7c, 0x7c, 0x20, 0x5f, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, + 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x20, 0x3d, 0x20, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, + 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, + 0x6f, 0x62, 0x6a, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x28, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x2c, 0x20, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x27, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, 0x62, + 0x79, 0x20, 0x61, 0x20, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, + 0x6e, 0x2e, 0x20, 0x50, 0x61, 0x73, 0x73, 0x20, 0x65, 0x69, 0x74, 0x68, + 0x65, 0x72, 0x20, 0x61, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x20, + 0x62, 0x79, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, + 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x20, 0x20, + 0x5f, 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, + 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, + 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, + 0x5f, 0x2e, 0x68, 0x61, 0x73, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x20, 0x3f, 0x20, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x20, 0x3a, 0x20, 0x28, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x20, + 0x3d, 0x20, 0x5b, 0x5d, 0x29, 0x29, 0x2e, 0x70, 0x75, 0x73, 0x68, 0x28, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x20, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x61, + 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x74, 0x68, 0x61, + 0x74, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x20, 0x62, 0x79, 0x20, 0x61, + 0x20, 0x63, 0x65, 0x72, 0x74, 0x61, 0x69, 0x6e, 0x20, 0x63, 0x72, 0x69, + 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x2e, 0x20, 0x50, 0x61, 0x73, 0x73, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x65, 0x69, 0x74, 0x68, 0x65, 0x72, + 0x20, 0x61, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x20, 0x62, 0x79, 0x2c, 0x20, 0x6f, 0x72, 0x20, + 0x61, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, + 0x74, 0x68, 0x65, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x72, 0x69, + 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x20, 0x3d, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x28, 0x6f, + 0x62, 0x6a, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2c, + 0x20, 0x6b, 0x65, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x5f, 0x2e, 0x68, 0x61, 0x73, + 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2c, 0x20, 0x6b, 0x65, 0x79, + 0x29, 0x29, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5b, 0x6b, 0x65, + 0x79, 0x5d, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5b, 0x6b, 0x65, 0x79, + 0x5d, 0x2b, 0x2b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x55, 0x73, 0x65, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x74, 0x6f, 0x20, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x20, + 0x6f, 0x75, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x6d, 0x61, 0x6c, + 0x6c, 0x65, 0x73, 0x74, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x61, + 0x74, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x0a, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x61, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x73, + 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69, 0x6e, 0x73, + 0x65, 0x72, 0x74, 0x65, 0x64, 0x20, 0x73, 0x6f, 0x20, 0x61, 0x73, 0x20, + 0x74, 0x6f, 0x20, 0x6d, 0x61, 0x69, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x20, + 0x6f, 0x72, 0x64, 0x65, 0x72, 0x2e, 0x20, 0x55, 0x73, 0x65, 0x73, 0x20, + 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x20, 0x73, 0x65, 0x61, 0x72, 0x63, + 0x68, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x73, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, + 0x20, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x20, 0x3f, + 0x20, 0x5f, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x20, + 0x3a, 0x20, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x49, 0x74, 0x65, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x28, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x6f, 0x62, 0x6a, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6c, 0x6f, 0x77, + 0x20, 0x3d, 0x20, 0x30, 0x2c, 0x20, 0x68, 0x69, 0x67, 0x68, 0x20, 0x3d, + 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, + 0x20, 0x28, 0x6c, 0x6f, 0x77, 0x20, 0x3c, 0x20, 0x68, 0x69, 0x67, 0x68, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, + 0x72, 0x20, 0x6d, 0x69, 0x64, 0x20, 0x3d, 0x20, 0x28, 0x6c, 0x6f, 0x77, + 0x20, 0x2b, 0x20, 0x68, 0x69, 0x67, 0x68, 0x29, 0x20, 0x3e, 0x3e, 0x3e, + 0x20, 0x31, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x61, 0x72, 0x72, + 0x61, 0x79, 0x5b, 0x6d, 0x69, 0x64, 0x5d, 0x29, 0x20, 0x3c, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3f, 0x20, 0x6c, 0x6f, 0x77, 0x20, 0x3d, + 0x20, 0x6d, 0x69, 0x64, 0x20, 0x2b, 0x20, 0x31, 0x20, 0x3a, 0x20, 0x68, + 0x69, 0x67, 0x68, 0x20, 0x3d, 0x20, 0x6d, 0x69, 0x64, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x6c, 0x6f, 0x77, 0x3b, 0x0a, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x53, 0x61, 0x66, 0x65, + 0x6c, 0x79, 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x20, 0x61, + 0x6e, 0x79, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x20, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x62, 0x6c, 0x65, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x61, 0x20, + 0x72, 0x65, 0x61, 0x6c, 0x2c, 0x20, 0x6c, 0x69, 0x76, 0x65, 0x20, 0x61, + 0x72, 0x72, 0x61, 0x79, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x74, 0x6f, + 0x41, 0x72, 0x72, 0x61, 0x79, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x6f, 0x62, 0x6a, + 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5b, 0x5d, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x5f, 0x2e, 0x69, + 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x29, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x73, 0x6c, 0x69, 0x63, + 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, + 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, + 0x2b, 0x6f, 0x62, 0x6a, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x29, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x6d, 0x61, + 0x70, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x5f, 0x2e, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x6e, 0x20, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x73, 0x69, 0x7a, + 0x65, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x20, 0x3d, 0x3d, 0x20, 0x6e, + 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x2b, 0x6f, 0x62, 0x6a, 0x2e, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x29, 0x20, 0x3f, 0x20, 0x6f, 0x62, 0x6a, + 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3a, 0x20, 0x5f, 0x2e, + 0x6b, 0x65, 0x79, 0x73, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x2e, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x20, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, + 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x47, 0x65, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69, 0x72, 0x73, + 0x74, 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x6f, 0x66, + 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x20, 0x50, + 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x2a, 0x2a, 0x6e, 0x2a, 0x2a, + 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69, 0x72, 0x73, 0x74, 0x20, 0x4e, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x72, 0x61, + 0x79, 0x2e, 0x20, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x64, 0x20, 0x61, + 0x73, 0x20, 0x60, 0x68, 0x65, 0x61, 0x64, 0x60, 0x20, 0x61, 0x6e, 0x64, + 0x20, 0x60, 0x74, 0x61, 0x6b, 0x65, 0x60, 0x2e, 0x20, 0x54, 0x68, 0x65, + 0x20, 0x2a, 0x2a, 0x67, 0x75, 0x61, 0x72, 0x64, 0x2a, 0x2a, 0x20, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x73, 0x20, 0x69, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, + 0x6f, 0x72, 0x6b, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x60, 0x5f, 0x2e, + 0x6d, 0x61, 0x70, 0x60, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x66, 0x69, + 0x72, 0x73, 0x74, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x68, 0x65, 0x61, 0x64, + 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x74, 0x61, 0x6b, 0x65, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x61, 0x72, 0x72, + 0x61, 0x79, 0x2c, 0x20, 0x6e, 0x2c, 0x20, 0x67, 0x75, 0x61, 0x72, 0x64, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, + 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, + 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x76, 0x6f, + 0x69, 0x64, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x28, 0x6e, 0x20, 0x21, 0x3d, 0x20, 0x6e, + 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x26, 0x26, 0x20, 0x21, 0x67, 0x75, 0x61, + 0x72, 0x64, 0x20, 0x3f, 0x20, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, + 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, 0x30, + 0x2c, 0x20, 0x6e, 0x29, 0x20, 0x3a, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, + 0x5b, 0x30, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, + 0x65, 0x76, 0x65, 0x72, 0x79, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x20, 0x62, + 0x75, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x20, 0x45, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x75, 0x73, 0x65, 0x66, 0x75, + 0x6c, 0x20, 0x6f, 0x6e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x20, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x20, 0x50, 0x61, 0x73, 0x73, + 0x69, 0x6e, 0x67, 0x20, 0x2a, 0x2a, 0x6e, 0x2a, 0x2a, 0x20, 0x77, 0x69, + 0x6c, 0x6c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x6c, + 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x20, 0x69, 0x6e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, 0x65, 0x78, 0x63, 0x6c, + 0x75, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6c, 0x61, + 0x73, 0x74, 0x20, 0x4e, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x2a, 0x2a, + 0x67, 0x75, 0x61, 0x72, 0x64, 0x2a, 0x2a, 0x20, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x20, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x73, 0x20, 0x69, 0x74, 0x20, + 0x74, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x20, 0x77, 0x69, 0x74, 0x68, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x60, 0x5f, 0x2e, 0x6d, 0x61, 0x70, + 0x60, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, 0x6e, 0x2c, 0x20, + 0x67, 0x75, 0x61, 0x72, 0x64, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x73, 0x6c, 0x69, 0x63, + 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, + 0x2c, 0x20, 0x30, 0x2c, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x2d, 0x20, 0x28, 0x28, 0x6e, 0x20, + 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x7c, 0x7c, 0x20, + 0x67, 0x75, 0x61, 0x72, 0x64, 0x20, 0x3f, 0x20, 0x31, 0x20, 0x3a, 0x20, + 0x6e, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x47, 0x65, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x6c, 0x61, 0x73, 0x74, 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x20, 0x6f, 0x66, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, + 0x2e, 0x20, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x2a, 0x2a, + 0x6e, 0x2a, 0x2a, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6c, 0x61, 0x73, 0x74, + 0x20, 0x4e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, + 0x72, 0x61, 0x79, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x2a, 0x2a, 0x67, + 0x75, 0x61, 0x72, 0x64, 0x2a, 0x2a, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x20, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x73, 0x20, 0x69, 0x74, 0x20, 0x74, + 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, + 0x60, 0x5f, 0x2e, 0x6d, 0x61, 0x70, 0x60, 0x2e, 0x0a, 0x20, 0x20, 0x5f, + 0x2e, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, + 0x6e, 0x2c, 0x20, 0x67, 0x75, 0x61, 0x72, 0x64, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x61, 0x72, 0x72, 0x61, + 0x79, 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x30, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x28, 0x6e, + 0x20, 0x21, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x26, 0x26, + 0x20, 0x21, 0x67, 0x75, 0x61, 0x72, 0x64, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, + 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x6d, + 0x61, 0x78, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x20, 0x2d, 0x20, 0x6e, 0x2c, 0x20, 0x30, 0x29, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5b, 0x61, 0x72, + 0x72, 0x61, 0x79, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x2d, + 0x20, 0x31, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, + 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x65, 0x76, 0x65, 0x72, 0x79, 0x74, + 0x68, 0x69, 0x6e, 0x67, 0x20, 0x62, 0x75, 0x74, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x66, 0x69, 0x72, 0x73, 0x74, 0x20, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x72, 0x61, + 0x79, 0x2e, 0x20, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x64, 0x20, 0x61, + 0x73, 0x20, 0x60, 0x74, 0x61, 0x69, 0x6c, 0x60, 0x20, 0x61, 0x6e, 0x64, + 0x20, 0x60, 0x64, 0x72, 0x6f, 0x70, 0x60, 0x2e, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x45, 0x73, 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x6c, 0x79, + 0x20, 0x75, 0x73, 0x65, 0x66, 0x75, 0x6c, 0x20, 0x6f, 0x6e, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x20, 0x50, 0x61, 0x73, + 0x73, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x2a, 0x2a, 0x6e, 0x2a, + 0x2a, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, + 0x65, 0x73, 0x74, 0x20, 0x4e, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x72, 0x61, + 0x79, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x2a, 0x2a, 0x67, 0x75, 0x61, + 0x72, 0x64, 0x2a, 0x2a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x20, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x73, 0x20, 0x69, + 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x20, 0x77, 0x69, + 0x74, 0x68, 0x20, 0x60, 0x5f, 0x2e, 0x6d, 0x61, 0x70, 0x60, 0x2e, 0x0a, + 0x20, 0x20, 0x5f, 0x2e, 0x72, 0x65, 0x73, 0x74, 0x20, 0x3d, 0x20, 0x5f, + 0x2e, 0x74, 0x61, 0x69, 0x6c, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x64, 0x72, + 0x6f, 0x70, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, 0x6e, 0x2c, 0x20, + 0x67, 0x75, 0x61, 0x72, 0x64, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x73, 0x6c, 0x69, 0x63, + 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, + 0x2c, 0x20, 0x28, 0x6e, 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, + 0x29, 0x20, 0x7c, 0x7c, 0x20, 0x67, 0x75, 0x61, 0x72, 0x64, 0x20, 0x3f, + 0x20, 0x31, 0x20, 0x3a, 0x20, 0x6e, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x54, 0x72, 0x69, 0x6d, + 0x20, 0x6f, 0x75, 0x74, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x66, 0x61, 0x6c, + 0x73, 0x79, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, 0x66, 0x72, + 0x6f, 0x6d, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, + 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x61, 0x72, 0x72, 0x61, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, + 0x5f, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x69, 0x6d, 0x70, + 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x6f, 0x66, 0x20, 0x61, 0x20, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, + 0x76, 0x65, 0x20, 0x60, 0x66, 0x6c, 0x61, 0x74, 0x74, 0x65, 0x6e, 0x60, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x66, 0x6c, 0x61, 0x74, 0x74, 0x65, 0x6e, + 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x2c, 0x20, 0x73, 0x68, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x2c, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, 0x28, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x5f, 0x2e, + 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x28, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x73, 0x68, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x20, 0x3f, 0x20, + 0x70, 0x75, 0x73, 0x68, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x29, 0x20, 0x3a, 0x20, 0x66, 0x6c, 0x61, 0x74, 0x74, 0x65, 0x6e, 0x28, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x73, 0x68, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x2c, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, + 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x2e, 0x70, 0x75, 0x73, 0x68, 0x28, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x6c, + 0x79, 0x20, 0x66, 0x6c, 0x61, 0x74, 0x74, 0x65, 0x6e, 0x65, 0x64, 0x20, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x61, + 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x0a, 0x20, 0x20, 0x5f, + 0x2e, 0x66, 0x6c, 0x61, 0x74, 0x74, 0x65, 0x6e, 0x20, 0x3d, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x61, 0x72, 0x72, 0x61, + 0x79, 0x2c, 0x20, 0x73, 0x68, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x66, 0x6c, 0x61, 0x74, 0x74, 0x65, 0x6e, 0x28, 0x61, 0x72, 0x72, + 0x61, 0x79, 0x2c, 0x20, 0x73, 0x68, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x2c, + 0x20, 0x5b, 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x61, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x65, 0x73, 0x20, 0x6e, 0x6f, 0x74, + 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x64, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x28, 0x73, 0x29, 0x2e, 0x0a, 0x20, 0x20, 0x5f, + 0x2e, 0x77, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x20, 0x3d, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x61, 0x72, 0x72, 0x61, + 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x64, 0x69, 0x66, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, + 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, + 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x31, 0x29, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x20, 0x61, 0x20, + 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2d, 0x66, 0x72, + 0x65, 0x65, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, + 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, + 0x20, 0x49, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x72, 0x61, + 0x79, 0x20, 0x68, 0x61, 0x73, 0x20, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, + 0x79, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x62, 0x65, 0x65, 0x6e, 0x20, + 0x73, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x2c, 0x20, 0x79, 0x6f, 0x75, 0x20, + 0x68, 0x61, 0x76, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, + 0x20, 0x61, 0x20, 0x66, 0x61, 0x73, 0x74, 0x65, 0x72, 0x20, 0x61, 0x6c, + 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x2e, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x64, 0x20, 0x61, 0x73, + 0x20, 0x60, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x60, 0x2e, 0x0a, 0x20, + 0x20, 0x5f, 0x2e, 0x75, 0x6e, 0x69, 0x71, 0x20, 0x3d, 0x20, 0x5f, 0x2e, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, + 0x20, 0x69, 0x73, 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x2c, 0x20, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x5f, 0x2e, 0x69, 0x73, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x69, 0x73, 0x53, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x20, 0x3d, 0x20, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x3d, + 0x20, 0x69, 0x73, 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x73, 0x53, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x20, 0x3d, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, + 0x20, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x20, 0x3d, 0x20, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x3f, 0x20, 0x5f, 0x2e, + 0x6d, 0x61, 0x70, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x29, 0x20, 0x3a, 0x20, 0x61, 0x72, 0x72, 0x61, + 0x79, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x73, 0x65, 0x65, + 0x6e, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x65, 0x61, 0x63, 0x68, 0x28, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x69, 0x73, 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x20, 0x3f, 0x20, + 0x28, 0x21, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x7c, 0x7c, 0x20, 0x73, + 0x65, 0x65, 0x6e, 0x5b, 0x73, 0x65, 0x65, 0x6e, 0x2e, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x20, 0x2d, 0x20, 0x31, 0x5d, 0x20, 0x21, 0x3d, 0x3d, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x3a, 0x20, 0x21, 0x5f, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x28, 0x73, 0x65, + 0x65, 0x6e, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, + 0x65, 0x6e, 0x2e, 0x70, 0x75, 0x73, 0x68, 0x28, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x70, 0x75, 0x73, 0x68, + 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5b, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x20, 0x61, + 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, 0x74, 0x68, 0x61, 0x74, + 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x65, 0x61, 0x63, + 0x68, 0x20, 0x64, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x63, 0x74, 0x20, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, + 0x61, 0x6c, 0x6c, 0x20, 0x6f, 0x66, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x2d, 0x69, + 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x73, 0x2e, 0x0a, 0x20, 0x20, + 0x5f, 0x2e, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, + 0x75, 0x6e, 0x69, 0x71, 0x28, 0x63, 0x6f, 0x6e, 0x63, 0x61, 0x74, 0x2e, + 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, + 0x65, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, + 0x20, 0x65, 0x76, 0x65, 0x72, 0x79, 0x20, 0x69, 0x74, 0x65, 0x6d, 0x20, + 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x20, 0x62, 0x65, 0x74, 0x77, 0x65, + 0x65, 0x6e, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x2d, 0x69, + 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x73, 0x2e, 0x0a, 0x20, 0x20, + 0x5f, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x73, 0x74, 0x20, + 0x3d, 0x20, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, + 0x28, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, + 0x31, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x28, + 0x5f, 0x2e, 0x75, 0x6e, 0x69, 0x71, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, + 0x29, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x69, 0x74, 0x65, 0x6d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x65, + 0x76, 0x65, 0x72, 0x79, 0x28, 0x72, 0x65, 0x73, 0x74, 0x2c, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x4f, 0x66, 0x28, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x2c, + 0x20, 0x69, 0x74, 0x65, 0x6d, 0x29, 0x20, 0x3e, 0x3d, 0x20, 0x30, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x54, 0x61, 0x6b, 0x65, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x64, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x20, 0x62, 0x65, 0x74, 0x77, 0x65, 0x65, 0x6e, 0x20, 0x6f, 0x6e, + 0x65, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, 0x61, 0x6e, 0x64, 0x20, + 0x61, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, + 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x73, + 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4f, 0x6e, 0x6c, 0x79, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x20, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x20, 0x69, 0x6e, 0x20, + 0x6a, 0x75, 0x73, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69, 0x72, + 0x73, 0x74, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, 0x77, 0x69, 0x6c, + 0x6c, 0x20, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x0a, 0x20, 0x20, + 0x5f, 0x2e, 0x64, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x61, 0x72, 0x72, 0x61, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x73, 0x74, 0x20, 0x3d, 0x20, + 0x63, 0x6f, 0x6e, 0x63, 0x61, 0x74, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, + 0x28, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2c, + 0x20, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, + 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x31, + 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x28, + 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x7b, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x21, 0x5f, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x28, 0x72, 0x65, 0x73, 0x74, 0x2c, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x20, 0x7d, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x5a, 0x69, 0x70, 0x20, 0x74, 0x6f, 0x67, 0x65, 0x74, 0x68, 0x65, 0x72, + 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x20, 0x6c, 0x69, + 0x73, 0x74, 0x73, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x61, 0x20, 0x73, + 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, + 0x2d, 0x2d, 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x20, + 0x74, 0x68, 0x61, 0x74, 0x20, 0x73, 0x68, 0x61, 0x72, 0x65, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x6e, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x20, 0x67, 0x6f, 0x20, 0x74, 0x6f, 0x67, 0x65, 0x74, 0x68, 0x65, 0x72, + 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x7a, 0x69, 0x70, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, 0x72, 0x67, + 0x73, 0x20, 0x3d, 0x20, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, 0x61, + 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x6d, 0x61, + 0x78, 0x28, 0x5f, 0x2e, 0x70, 0x6c, 0x75, 0x63, 0x6b, 0x28, 0x61, 0x72, + 0x67, 0x73, 0x2c, 0x20, 0x27, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x27, + 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x20, 0x3d, 0x20, 0x6e, 0x65, + 0x77, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x28, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x28, 0x76, 0x61, 0x72, 0x20, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, + 0x20, 0x69, 0x20, 0x3c, 0x20, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x3b, + 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5b, 0x69, 0x5d, + 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x70, 0x6c, 0x75, 0x63, 0x6b, 0x28, 0x61, + 0x72, 0x67, 0x73, 0x2c, 0x20, 0x22, 0x22, 0x20, 0x2b, 0x20, 0x69, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x73, 0x20, + 0x6c, 0x69, 0x73, 0x74, 0x73, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2e, 0x20, 0x50, 0x61, 0x73, 0x73, + 0x20, 0x65, 0x69, 0x74, 0x68, 0x65, 0x72, 0x20, 0x61, 0x20, 0x73, 0x69, + 0x6e, 0x67, 0x6c, 0x65, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, 0x6f, + 0x66, 0x20, 0x60, 0x5b, 0x6b, 0x65, 0x79, 0x2c, 0x20, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x5d, 0x60, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x70, 0x61, + 0x69, 0x72, 0x73, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x77, 0x6f, 0x20, + 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x20, 0x61, 0x72, 0x72, + 0x61, 0x79, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, + 0x61, 0x6d, 0x65, 0x20, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x2d, + 0x2d, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x6b, 0x65, 0x79, + 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x6f, + 0x66, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, + 0x6f, 0x72, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x5f, + 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6c, 0x69, 0x73, 0x74, 0x2c, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6c, 0x69, 0x73, 0x74, 0x20, + 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x7b, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, + 0x20, 0x7b, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x28, 0x76, 0x61, 0x72, 0x20, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x2c, + 0x20, 0x6c, 0x20, 0x3d, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x2e, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x6c, 0x3b, + 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5b, 0x6c, 0x69, 0x73, 0x74, 0x5b, + 0x69, 0x5d, 0x5d, 0x20, 0x3d, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x5b, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5b, 0x6c, + 0x69, 0x73, 0x74, 0x5b, 0x69, 0x5d, 0x5b, 0x30, 0x5d, 0x5d, 0x20, 0x3d, + 0x20, 0x6c, 0x69, 0x73, 0x74, 0x5b, 0x69, 0x5d, 0x5b, 0x31, 0x5d, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, 0x20, + 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x66, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x62, 0x72, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x20, + 0x64, 0x6f, 0x65, 0x73, 0x6e, 0x27, 0x74, 0x20, 0x73, 0x75, 0x70, 0x70, + 0x6c, 0x79, 0x20, 0x75, 0x73, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x20, 0x28, 0x49, 0x27, 0x6d, 0x20, + 0x6c, 0x6f, 0x6f, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x74, 0x20, 0x79, + 0x6f, 0x75, 0x2c, 0x20, 0x2a, 0x2a, 0x4d, 0x53, 0x49, 0x45, 0x2a, 0x2a, + 0x29, 0x2c, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x77, 0x65, 0x20, 0x6e, + 0x65, 0x65, 0x64, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69, + 0x72, 0x73, 0x74, 0x20, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x6e, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x69, 0x74, 0x65, 0x6d, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x6e, + 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x2d, + 0x31, 0x20, 0x69, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x69, 0x74, 0x65, + 0x6d, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, + 0x6f, 0x20, 0x2a, 0x2a, 0x45, 0x43, 0x4d, 0x41, 0x53, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x20, 0x35, 0x2a, 0x2a, 0x27, 0x73, 0x20, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x20, 0x60, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, + 0x60, 0x20, 0x69, 0x66, 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x66, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, 0x69, 0x73, + 0x20, 0x6c, 0x61, 0x72, 0x67, 0x65, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, + 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x20, 0x69, 0x6e, 0x20, 0x73, 0x6f, + 0x72, 0x74, 0x20, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x2c, 0x20, 0x70, 0x61, + 0x73, 0x73, 0x20, 0x60, 0x74, 0x72, 0x75, 0x65, 0x60, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x2a, 0x2a, 0x69, 0x73, 0x53, + 0x6f, 0x72, 0x74, 0x65, 0x64, 0x2a, 0x2a, 0x20, 0x74, 0x6f, 0x20, 0x75, + 0x73, 0x65, 0x20, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x20, 0x73, 0x65, + 0x61, 0x72, 0x63, 0x68, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x4f, 0x66, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, + 0x69, 0x74, 0x65, 0x6d, 0x2c, 0x20, 0x69, 0x73, 0x53, 0x6f, 0x72, 0x74, + 0x65, 0x64, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, 0x3d, 0x3d, 0x20, 0x6e, + 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x2d, 0x31, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x69, 0x20, 0x3d, 0x20, 0x30, 0x2c, 0x20, 0x6c, 0x20, 0x3d, 0x20, 0x61, + 0x72, 0x72, 0x61, 0x79, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x69, 0x73, 0x53, + 0x6f, 0x72, 0x74, 0x65, 0x64, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x79, 0x70, 0x65, 0x6f, + 0x66, 0x20, 0x69, 0x73, 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x20, 0x3d, + 0x3d, 0x20, 0x27, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x27, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x20, + 0x3d, 0x20, 0x28, 0x69, 0x73, 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x20, + 0x3c, 0x20, 0x30, 0x20, 0x3f, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x6d, + 0x61, 0x78, 0x28, 0x30, 0x2c, 0x20, 0x6c, 0x20, 0x2b, 0x20, 0x69, 0x73, + 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x29, 0x20, 0x3a, 0x20, 0x69, 0x73, + 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x20, 0x3d, 0x20, + 0x5f, 0x2e, 0x73, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2c, 0x20, 0x69, 0x74, 0x65, + 0x6d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, + 0x5b, 0x69, 0x5d, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x69, 0x74, 0x65, 0x6d, + 0x20, 0x3f, 0x20, 0x69, 0x20, 0x3a, 0x20, 0x2d, 0x31, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x20, 0x26, + 0x26, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x4f, 0x66, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x6e, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x29, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x28, 0x69, 0x74, 0x65, 0x6d, + 0x2c, 0x20, 0x69, 0x73, 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x3b, 0x20, + 0x69, 0x20, 0x3c, 0x20, 0x6c, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5b, 0x69, 0x5d, + 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x69, 0x74, 0x65, 0x6d, 0x29, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x69, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x2d, 0x31, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, + 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, + 0x2a, 0x2a, 0x45, 0x43, 0x4d, 0x41, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x20, 0x35, 0x2a, 0x2a, 0x27, 0x73, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x20, 0x60, 0x6c, 0x61, 0x73, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x4f, 0x66, 0x60, 0x20, 0x69, 0x66, 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x6c, 0x61, + 0x73, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x61, 0x72, 0x72, + 0x61, 0x79, 0x2c, 0x20, 0x69, 0x74, 0x65, 0x6d, 0x2c, 0x20, 0x66, 0x72, + 0x6f, 0x6d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, 0x3d, 0x3d, 0x20, 0x6e, + 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x2d, 0x31, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x68, 0x61, 0x73, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x3d, 0x20, 0x66, + 0x72, 0x6f, 0x6d, 0x20, 0x21, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x4f, 0x66, 0x20, 0x26, 0x26, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, + 0x6c, 0x61, 0x73, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x20, + 0x3d, 0x3d, 0x3d, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4c, 0x61, + 0x73, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x68, 0x61, 0x73, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x3f, + 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x6c, 0x61, 0x73, 0x74, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x28, 0x69, 0x74, 0x65, 0x6d, 0x2c, + 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x29, 0x20, 0x3a, 0x20, 0x61, 0x72, 0x72, + 0x61, 0x79, 0x2e, 0x6c, 0x61, 0x73, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x4f, 0x66, 0x28, 0x69, 0x74, 0x65, 0x6d, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x69, 0x20, 0x3d, 0x20, 0x28, 0x68, 0x61, 0x73, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x20, 0x3f, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x3a, 0x20, 0x61, + 0x72, 0x72, 0x61, 0x79, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, + 0x28, 0x69, 0x2d, 0x2d, 0x29, 0x20, 0x69, 0x66, 0x20, 0x28, 0x61, 0x72, + 0x72, 0x61, 0x79, 0x5b, 0x69, 0x5d, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x69, + 0x74, 0x65, 0x6d, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x69, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x2d, 0x31, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x20, 0x61, 0x6e, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, + 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x65, 0x74, 0x69, 0x63, 0x20, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x20, 0x41, 0x20, 0x70, + 0x6f, 0x72, 0x74, 0x20, 0x6f, 0x66, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, 0x50, + 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x20, 0x60, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x28, 0x29, 0x60, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x20, 0x53, 0x65, 0x65, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x5b, + 0x74, 0x68, 0x65, 0x20, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x20, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5d, 0x28, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x64, 0x6f, 0x63, + 0x73, 0x2e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x23, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x29, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x73, 0x74, 0x61, 0x72, 0x74, 0x2c, 0x20, + 0x73, 0x74, 0x6f, 0x70, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x61, 0x72, + 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x20, 0x3c, 0x3d, 0x20, 0x31, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x20, 0x3d, 0x20, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x20, 0x7c, 0x7c, 0x20, 0x30, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, 0x20, + 0x3d, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x73, 0x74, 0x65, 0x70, 0x20, 0x3d, 0x20, 0x61, 0x72, + 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x5b, 0x32, 0x5d, 0x20, 0x7c, + 0x7c, 0x20, 0x31, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, + 0x72, 0x20, 0x6c, 0x65, 0x6e, 0x20, 0x3d, 0x20, 0x4d, 0x61, 0x74, 0x68, + 0x2e, 0x6d, 0x61, 0x78, 0x28, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x63, 0x65, + 0x69, 0x6c, 0x28, 0x28, 0x73, 0x74, 0x6f, 0x70, 0x20, 0x2d, 0x20, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x29, 0x20, 0x2f, 0x20, 0x73, 0x74, 0x65, 0x70, + 0x29, 0x2c, 0x20, 0x30, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, + 0x61, 0x72, 0x20, 0x69, 0x64, 0x78, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x41, 0x72, 0x72, 0x61, + 0x79, 0x28, 0x6c, 0x65, 0x6e, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x28, 0x69, 0x64, 0x78, 0x20, 0x3c, + 0x20, 0x6c, 0x65, 0x6e, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5b, 0x69, 0x64, 0x78, 0x2b, + 0x2b, 0x5d, 0x20, 0x3d, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, 0x20, + 0x2b, 0x3d, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x3b, 0x0a, 0x20, 0x20, + 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x61, 0x68, 0x65, 0x6d, 0x29, + 0x20, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, + 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x20, + 0x61, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x62, + 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x61, 0x20, 0x67, 0x69, + 0x76, 0x65, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x28, + 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x60, 0x74, + 0x68, 0x69, 0x73, 0x60, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x72, + 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x6c, 0x79, + 0x29, 0x2e, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, + 0x20, 0x74, 0x6f, 0x20, 0x2a, 0x2a, 0x45, 0x43, 0x4d, 0x41, 0x53, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x20, 0x35, 0x2a, 0x2a, 0x27, 0x73, 0x20, 0x6e, + 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, 0x60, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x69, 0x6e, 0x64, 0x60, 0x20, 0x69, 0x66, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x62, 0x69, 0x6e, + 0x64, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x66, 0x75, 0x6e, 0x63, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x66, 0x75, 0x6e, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x64, 0x20, + 0x3d, 0x3d, 0x3d, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x69, + 0x6e, 0x64, 0x20, 0x26, 0x26, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x42, 0x69, 0x6e, 0x64, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x69, 0x6e, 0x64, 0x2e, + 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x66, 0x75, 0x6e, 0x63, 0x2c, 0x20, + 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, + 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x31, 0x29, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, + 0x72, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, + 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x2c, 0x20, 0x32, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x63, 0x61, 0x74, 0x28, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, + 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x29, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x61, 0x70, + 0x70, 0x6c, 0x79, 0x20, 0x61, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x62, 0x79, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, + 0x6e, 0x67, 0x20, 0x61, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x68, 0x61, 0x73, 0x20, 0x68, 0x61, + 0x64, 0x20, 0x73, 0x6f, 0x6d, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x69, 0x74, + 0x73, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x20, 0x70, 0x72, 0x65, 0x2d, 0x66, 0x69, 0x6c, + 0x6c, 0x65, 0x64, 0x2c, 0x20, 0x77, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, + 0x20, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x69, 0x6e, 0x67, 0x20, 0x69, 0x74, + 0x73, 0x20, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x20, 0x60, 0x74, + 0x68, 0x69, 0x73, 0x60, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, + 0x6c, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x66, 0x75, 0x6e, 0x63, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, 0x72, 0x67, 0x73, 0x20, 0x3d, 0x20, + 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, + 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x31, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x2e, 0x61, 0x70, 0x70, 0x6c, + 0x79, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x73, + 0x2e, 0x63, 0x6f, 0x6e, 0x63, 0x61, 0x74, 0x28, 0x73, 0x6c, 0x69, 0x63, + 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x67, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x29, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x42, 0x69, 0x6e, 0x64, 0x20, 0x61, 0x6c, 0x6c, 0x20, + 0x6f, 0x66, 0x20, 0x61, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x27, 0x73, 0x20, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x20, 0x74, + 0x6f, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x20, 0x55, 0x73, 0x65, 0x66, 0x75, 0x6c, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x65, 0x6e, 0x73, 0x75, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x6c, 0x6c, + 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x20, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20, 0x61, 0x6e, + 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x62, 0x65, 0x6c, 0x6f, + 0x6e, 0x67, 0x20, 0x74, 0x6f, 0x20, 0x69, 0x74, 0x2e, 0x0a, 0x20, 0x20, + 0x5f, 0x2e, 0x62, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x6c, 0x69, 0x63, + 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x67, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x31, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x66, 0x75, 0x6e, 0x63, 0x73, 0x2e, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x30, + 0x29, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x73, 0x20, 0x3d, 0x20, 0x5f, 0x2e, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x28, 0x6f, 0x62, + 0x6a, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, + 0x28, 0x66, 0x75, 0x6e, 0x63, 0x73, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x66, 0x29, 0x20, 0x7b, 0x20, 0x6f, 0x62, + 0x6a, 0x5b, 0x66, 0x5d, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x62, 0x69, 0x6e, + 0x64, 0x28, 0x6f, 0x62, 0x6a, 0x5b, 0x66, 0x5d, 0x2c, 0x20, 0x6f, 0x62, + 0x6a, 0x29, 0x3b, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4d, + 0x65, 0x6d, 0x6f, 0x69, 0x7a, 0x65, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x78, + 0x70, 0x65, 0x6e, 0x73, 0x69, 0x76, 0x65, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x62, 0x79, 0x20, 0x73, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x20, 0x69, 0x74, 0x73, 0x20, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x6d, 0x65, 0x6d, + 0x6f, 0x69, 0x7a, 0x65, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x66, 0x75, 0x6e, 0x63, 0x2c, 0x20, 0x68, 0x61, + 0x73, 0x68, 0x65, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x76, 0x61, 0x72, 0x20, 0x6d, 0x65, 0x6d, 0x6f, 0x20, 0x3d, 0x20, 0x7b, + 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x72, 0x20, 0x7c, 0x7c, 0x20, 0x28, 0x68, 0x61, 0x73, 0x68, 0x65, 0x72, + 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, + 0x72, 0x20, 0x6b, 0x65, 0x79, 0x20, 0x3d, 0x20, 0x68, 0x61, 0x73, 0x68, + 0x65, 0x72, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x74, 0x68, 0x69, + 0x73, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x68, 0x61, 0x73, 0x28, 0x6d, 0x65, + 0x6d, 0x6f, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x20, 0x3f, 0x20, 0x6d, + 0x65, 0x6d, 0x6f, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x20, 0x3a, 0x20, 0x28, + 0x6d, 0x65, 0x6d, 0x6f, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x74, + 0x68, 0x69, 0x73, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x44, 0x65, 0x6c, 0x61, 0x79, 0x73, 0x20, 0x61, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x20, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, + 0x74, 0x68, 0x65, 0x6e, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x69, 0x74, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x64, 0x2e, 0x0a, + 0x20, 0x20, 0x5f, 0x2e, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x66, 0x75, 0x6e, + 0x63, 0x2c, 0x20, 0x77, 0x61, 0x69, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, 0x72, 0x67, 0x73, 0x20, + 0x3d, 0x20, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, + 0x28, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, + 0x32, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x73, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x28, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, + 0x7b, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x6e, 0x75, 0x6c, 0x6c, + 0x2c, 0x20, 0x61, 0x72, 0x67, 0x73, 0x29, 0x3b, 0x20, 0x7d, 0x2c, 0x20, + 0x77, 0x61, 0x69, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, 0x65, 0x66, 0x65, 0x72, 0x73, + 0x20, 0x61, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2c, + 0x20, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x69, 0x6e, 0x67, 0x20, + 0x69, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x75, 0x6e, 0x20, 0x61, 0x66, + 0x74, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x20, 0x73, 0x74, 0x61, + 0x63, 0x6b, 0x20, 0x68, 0x61, 0x73, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x63, 0x6c, 0x65, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x0a, 0x20, 0x20, 0x5f, + 0x2e, 0x64, 0x65, 0x66, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x66, 0x75, 0x6e, 0x63, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x5f, 0x2e, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x61, 0x70, 0x70, + 0x6c, 0x79, 0x28, 0x5f, 0x2c, 0x20, 0x5b, 0x66, 0x75, 0x6e, 0x63, 0x2c, + 0x20, 0x31, 0x5d, 0x2e, 0x63, 0x6f, 0x6e, 0x63, 0x61, 0x74, 0x28, 0x73, + 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, + 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x31, 0x29, 0x29, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x61, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x74, 0x68, + 0x61, 0x74, 0x2c, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, 0x69, 0x6e, 0x76, + 0x6f, 0x6b, 0x65, 0x64, 0x2c, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x6f, + 0x6e, 0x6c, 0x79, 0x20, 0x62, 0x65, 0x20, 0x74, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x65, 0x64, 0x20, 0x61, 0x74, 0x20, 0x6d, 0x6f, 0x73, 0x74, + 0x20, 0x6f, 0x6e, 0x63, 0x65, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x64, + 0x75, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x67, 0x69, 0x76, 0x65, + 0x6e, 0x20, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x20, 0x6f, 0x66, 0x20, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x74, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x66, 0x75, 0x6e, 0x63, 0x2c, 0x20, + 0x77, 0x61, 0x69, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x76, 0x61, 0x72, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, + 0x20, 0x61, 0x72, 0x67, 0x73, 0x2c, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x2c, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x70, 0x72, 0x65, 0x76, + 0x69, 0x6f, 0x75, 0x73, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6c, 0x61, 0x74, 0x65, 0x72, 0x20, + 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x72, 0x65, + 0x76, 0x69, 0x6f, 0x75, 0x73, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, + 0x44, 0x61, 0x74, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x20, 0x3d, 0x20, 0x6e, 0x75, + 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x2e, + 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, + 0x61, 0x72, 0x20, 0x6e, 0x6f, 0x77, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, + 0x20, 0x44, 0x61, 0x74, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x20, 0x3d, 0x20, 0x77, 0x61, 0x69, 0x74, 0x20, 0x2d, 0x20, + 0x28, 0x6e, 0x6f, 0x77, 0x20, 0x2d, 0x20, 0x70, 0x72, 0x65, 0x76, 0x69, + 0x6f, 0x75, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x20, 0x3d, 0x20, 0x74, 0x68, + 0x69, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x72, + 0x67, 0x73, 0x20, 0x3d, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, + 0x3c, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6c, 0x65, 0x61, 0x72, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x28, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, + 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, + 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x20, 0x3d, 0x20, 0x6e, 0x6f, + 0x77, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x73, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x21, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x20, 0x3d, 0x20, 0x73, 0x65, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x28, 0x6c, 0x61, 0x74, + 0x65, 0x72, 0x2c, 0x20, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x2c, 0x20, 0x61, 0x73, 0x20, 0x6c, 0x6f, 0x6e, 0x67, + 0x20, 0x61, 0x73, 0x20, 0x69, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x69, + 0x6e, 0x75, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65, 0x20, 0x69, + 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x2c, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x6e, 0x6f, 0x74, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x62, 0x65, + 0x20, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x65, 0x64, 0x2e, 0x20, + 0x54, 0x68, 0x65, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x64, 0x20, 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, 0x69, 0x74, + 0x20, 0x73, 0x74, 0x6f, 0x70, 0x73, 0x20, 0x62, 0x65, 0x69, 0x6e, 0x67, + 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4e, 0x20, 0x6d, 0x69, 0x6c, 0x6c, 0x69, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x2e, 0x20, 0x49, 0x66, 0x20, + 0x60, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x60, 0x20, + 0x69, 0x73, 0x20, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x2c, 0x20, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x20, 0x74, + 0x68, 0x65, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6c, 0x65, 0x61, 0x64, + 0x69, 0x6e, 0x67, 0x20, 0x65, 0x64, 0x67, 0x65, 0x2c, 0x20, 0x69, 0x6e, + 0x73, 0x74, 0x65, 0x61, 0x64, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x2e, 0x0a, 0x20, + 0x20, 0x5f, 0x2e, 0x64, 0x65, 0x62, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x20, + 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x66, + 0x75, 0x6e, 0x63, 0x2c, 0x20, 0x77, 0x61, 0x69, 0x74, 0x2c, 0x20, 0x69, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x2c, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x20, 0x3d, 0x20, 0x74, 0x68, 0x69, + 0x73, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x61, 0x72, + 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6c, 0x61, 0x74, 0x65, 0x72, + 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x20, 0x3d, 0x20, 0x6e, 0x75, + 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x21, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, + 0x74, 0x65, 0x29, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x61, 0x72, 0x67, + 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x63, + 0x61, 0x6c, 0x6c, 0x4e, 0x6f, 0x77, 0x20, 0x3d, 0x20, 0x69, 0x6d, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x20, 0x26, 0x26, 0x20, 0x21, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6c, 0x65, 0x61, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x28, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x28, 0x6c, 0x61, 0x74, 0x65, 0x72, 0x2c, 0x20, 0x77, + 0x61, 0x69, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x63, 0x61, 0x6c, 0x6c, 0x4e, 0x6f, 0x77, 0x29, + 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x73, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, + 0x61, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x64, 0x20, 0x61, 0x74, 0x20, + 0x6d, 0x6f, 0x73, 0x74, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x74, 0x69, 0x6d, + 0x65, 0x2c, 0x20, 0x6e, 0x6f, 0x20, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x20, 0x68, 0x6f, 0x77, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6f, 0x66, + 0x74, 0x65, 0x6e, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x63, 0x61, 0x6c, 0x6c, + 0x20, 0x69, 0x74, 0x2e, 0x20, 0x55, 0x73, 0x65, 0x66, 0x75, 0x6c, 0x20, + 0x66, 0x6f, 0x72, 0x20, 0x6c, 0x61, 0x7a, 0x79, 0x20, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x6f, 0x6e, 0x63, 0x65, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x66, 0x75, 0x6e, + 0x63, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, + 0x20, 0x72, 0x61, 0x6e, 0x20, 0x3d, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x2c, 0x20, 0x6d, 0x65, 0x6d, 0x6f, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x61, 0x6e, 0x29, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6d, 0x65, 0x6d, 0x6f, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x61, 0x6e, 0x20, 0x3d, 0x20, + 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x6d, 0x65, 0x6d, 0x6f, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x2e, + 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2c, 0x20, + 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x20, 0x3d, + 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6d, 0x65, 0x6d, 0x6f, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69, 0x72, 0x73, + 0x74, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x70, + 0x61, 0x73, 0x73, 0x65, 0x64, 0x20, 0x61, 0x73, 0x20, 0x61, 0x6e, 0x20, + 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x2c, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x69, 0x6e, + 0x67, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x74, 0x6f, 0x20, 0x61, 0x64, 0x6a, + 0x75, 0x73, 0x74, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x2c, 0x20, 0x72, 0x75, 0x6e, 0x20, 0x63, 0x6f, 0x64, 0x65, 0x20, + 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, + 0x66, 0x74, 0x65, 0x72, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, + 0x6c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, + 0x20, 0x20, 0x5f, 0x2e, 0x77, 0x72, 0x61, 0x70, 0x20, 0x3d, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x66, 0x75, 0x6e, 0x63, + 0x2c, 0x20, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, + 0x72, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x5b, 0x66, 0x75, 0x6e, 0x63, 0x5d, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x75, 0x73, 0x68, + 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x61, 0x72, 0x67, 0x73, 0x2c, + 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2e, 0x61, 0x70, + 0x70, 0x6c, 0x79, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2c, 0x20, 0x61, 0x72, + 0x67, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x61, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x69, + 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x20, 0x6c, + 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x20, 0x65, 0x61, 0x63, 0x68, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x69, 0x6e, + 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x73, 0x2e, + 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, + 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x73, 0x20, 0x3d, 0x20, 0x61, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, 0x72, 0x67, 0x73, 0x20, 0x3d, 0x20, + 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x76, 0x61, + 0x72, 0x20, 0x69, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x73, 0x2e, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x2d, 0x20, 0x31, 0x3b, 0x20, + 0x69, 0x20, 0x3e, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x2d, 0x2d, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, + 0x72, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x5b, 0x66, 0x75, 0x6e, 0x63, 0x73, + 0x5b, 0x69, 0x5d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x74, 0x68, + 0x69, 0x73, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x73, 0x29, 0x5d, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x72, 0x67, + 0x73, 0x5b, 0x30, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x61, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, + 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x62, 0x65, + 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x64, 0x20, 0x61, 0x66, + 0x74, 0x65, 0x72, 0x20, 0x62, 0x65, 0x69, 0x6e, 0x67, 0x20, 0x63, 0x61, + 0x6c, 0x6c, 0x65, 0x64, 0x20, 0x4e, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, + 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x20, 0x3c, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x28, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x2d, + 0x2d, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x20, 0x3c, 0x20, 0x31, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x2e, 0x61, 0x70, + 0x70, 0x6c, 0x79, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2c, 0x20, 0x61, 0x72, + 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x2d, 0x2d, + 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, + 0x2d, 0x2d, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, + 0x72, 0x69, 0x65, 0x76, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x6e, 0x20, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x27, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x6f, + 0x20, 0x2a, 0x2a, 0x45, 0x43, 0x4d, 0x41, 0x53, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x20, 0x35, 0x2a, 0x2a, 0x27, 0x73, 0x20, 0x6e, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x20, 0x60, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x6b, + 0x65, 0x79, 0x73, 0x60, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x6b, 0x65, 0x79, + 0x73, 0x20, 0x3d, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4b, 0x65, + 0x79, 0x73, 0x20, 0x7c, 0x7c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x20, 0x21, 0x3d, + 0x3d, 0x20, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x6f, 0x62, 0x6a, + 0x29, 0x29, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x6e, 0x65, 0x77, + 0x20, 0x54, 0x79, 0x70, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x27, + 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x20, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, + 0x72, 0x20, 0x6b, 0x65, 0x79, 0x73, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x76, 0x61, + 0x72, 0x20, 0x6b, 0x65, 0x79, 0x20, 0x69, 0x6e, 0x20, 0x6f, 0x62, 0x6a, + 0x29, 0x20, 0x69, 0x66, 0x20, 0x28, 0x5f, 0x2e, 0x68, 0x61, 0x73, 0x28, + 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x29, 0x20, 0x6b, + 0x65, 0x79, 0x73, 0x5b, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x5d, 0x20, 0x3d, 0x20, 0x6b, 0x65, 0x79, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6b, + 0x65, 0x79, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, + 0x6f, 0x66, 0x20, 0x61, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x27, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x76, 0x61, 0x72, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, 0x3d, + 0x20, 0x5b, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x28, 0x76, 0x61, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x20, 0x69, 0x6e, + 0x20, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x69, 0x66, 0x20, 0x28, 0x5f, 0x2e, + 0x68, 0x61, 0x73, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x6b, 0x65, 0x79, + 0x29, 0x29, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2e, 0x70, 0x75, + 0x73, 0x68, 0x28, 0x6f, 0x62, 0x6a, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x6e, 0x76, + 0x65, 0x72, 0x74, 0x20, 0x61, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x61, 0x20, 0x6c, 0x69, 0x73, + 0x74, 0x20, 0x6f, 0x66, 0x20, 0x60, 0x5b, 0x6b, 0x65, 0x79, 0x2c, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5d, 0x60, 0x20, 0x70, 0x61, 0x69, 0x72, + 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x70, 0x61, 0x69, 0x72, 0x73, + 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, + 0x61, 0x72, 0x20, 0x70, 0x61, 0x69, 0x72, 0x73, 0x20, 0x3d, 0x20, 0x5b, + 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, + 0x76, 0x61, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x20, 0x69, 0x6e, 0x20, 0x6f, + 0x62, 0x6a, 0x29, 0x20, 0x69, 0x66, 0x20, 0x28, 0x5f, 0x2e, 0x68, 0x61, + 0x73, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x29, + 0x20, 0x70, 0x61, 0x69, 0x72, 0x73, 0x2e, 0x70, 0x75, 0x73, 0x68, 0x28, + 0x5b, 0x6b, 0x65, 0x79, 0x2c, 0x20, 0x6f, 0x62, 0x6a, 0x5b, 0x6b, 0x65, + 0x79, 0x5d, 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x70, 0x61, 0x69, 0x72, 0x73, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, + 0x6e, 0x76, 0x65, 0x72, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6b, 0x65, + 0x79, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62, 0x65, 0x20, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x20, + 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, + 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, + 0x72, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x7b, + 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, + 0x76, 0x61, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x20, 0x69, 0x6e, 0x20, 0x6f, + 0x62, 0x6a, 0x29, 0x20, 0x69, 0x66, 0x20, 0x28, 0x5f, 0x2e, 0x68, 0x61, + 0x73, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x29, + 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5b, 0x6f, 0x62, 0x6a, 0x5b, + 0x6b, 0x65, 0x79, 0x5d, 0x5d, 0x20, 0x3d, 0x20, 0x6b, 0x65, 0x79, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x61, 0x20, 0x73, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x20, 0x6c, + 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x20, + 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x65, 0x64, 0x20, 0x61, 0x73, 0x20, 0x60, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x73, 0x60, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x6d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x76, 0x61, 0x72, 0x20, 0x6b, 0x65, + 0x79, 0x20, 0x69, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x5f, 0x2e, + 0x69, 0x73, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, + 0x62, 0x6a, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x29, 0x29, 0x20, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x2e, 0x70, 0x75, 0x73, 0x68, 0x28, 0x6b, 0x65, 0x79, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x2e, 0x73, 0x6f, 0x72, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x64, 0x20, 0x61, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x20, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, + 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x70, 0x61, + 0x73, 0x73, 0x65, 0x64, 0x2d, 0x69, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x28, 0x73, 0x29, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, 0x28, 0x73, 0x6c, 0x69, + 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x31, 0x29, 0x2c, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x28, 0x76, 0x61, 0x72, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x20, + 0x69, 0x6e, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, + 0x62, 0x6a, 0x5b, 0x70, 0x72, 0x6f, 0x70, 0x5d, 0x20, 0x3d, 0x20, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5b, 0x70, 0x72, 0x6f, 0x70, 0x5d, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x61, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x20, 0x6f, 0x66, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x6f, 0x6e, + 0x6c, 0x79, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x68, 0x69, 0x74, 0x65, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x64, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x70, 0x69, + 0x63, 0x6b, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x20, 0x3d, 0x20, + 0x7b, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x6b, 0x65, 0x79, 0x73, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6e, 0x63, 0x61, + 0x74, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x41, 0x72, 0x72, 0x61, + 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2c, 0x20, 0x73, 0x6c, 0x69, 0x63, + 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x67, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x31, 0x29, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, 0x28, 0x6b, 0x65, 0x79, 0x73, + 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6b, + 0x65, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x6b, 0x65, 0x79, 0x20, 0x69, 0x6e, 0x20, 0x6f, + 0x62, 0x6a, 0x29, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5b, 0x6b, 0x65, 0x79, + 0x5d, 0x20, 0x3d, 0x20, 0x6f, 0x62, 0x6a, 0x5b, 0x6b, 0x65, 0x79, 0x5d, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x63, 0x6f, 0x70, + 0x79, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x20, + 0x63, 0x6f, 0x70, 0x79, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x77, 0x69, 0x74, 0x68, 0x6f, + 0x75, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x6c, 0x61, 0x63, 0x6b, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x64, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x6f, + 0x6d, 0x69, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x20, 0x3d, + 0x20, 0x7b, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, + 0x20, 0x6b, 0x65, 0x79, 0x73, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6e, 0x63, + 0x61, 0x74, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x41, 0x72, 0x72, + 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2c, 0x20, 0x73, 0x6c, 0x69, + 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x31, 0x29, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x76, 0x61, 0x72, + 0x20, 0x6b, 0x65, 0x79, 0x20, 0x69, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x21, 0x5f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, + 0x28, 0x6b, 0x65, 0x79, 0x73, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x29, + 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x20, 0x3d, + 0x20, 0x6f, 0x62, 0x6a, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x3b, 0x0a, 0x20, 0x20, + 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x46, 0x69, 0x6c, + 0x6c, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, + 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x77, 0x69, 0x74, 0x68, + 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x5f, + 0x2e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, + 0x28, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, + 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x31, + 0x29, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x76, 0x61, 0x72, 0x20, 0x70, + 0x72, 0x6f, 0x70, 0x20, 0x69, 0x6e, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x5b, 0x70, + 0x72, 0x6f, 0x70, 0x5d, 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, + 0x29, 0x20, 0x6f, 0x62, 0x6a, 0x5b, 0x70, 0x72, 0x6f, 0x70, 0x5d, 0x20, + 0x3d, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5b, 0x70, 0x72, 0x6f, + 0x70, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x3b, 0x0a, 0x20, 0x20, + 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x20, 0x61, 0x20, 0x28, 0x73, 0x68, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x2d, 0x63, 0x6c, 0x6f, 0x6e, 0x65, 0x64, 0x29, 0x20, 0x64, + 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x20, 0x6f, 0x66, 0x20, + 0x61, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x0a, 0x20, + 0x20, 0x5f, 0x2e, 0x63, 0x6c, 0x6f, 0x6e, 0x65, 0x20, 0x3d, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, + 0x5f, 0x2e, 0x69, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x6f, + 0x62, 0x6a, 0x29, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x6f, 0x62, 0x6a, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, + 0x79, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x3f, 0x20, 0x6f, 0x62, 0x6a, + 0x2e, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x28, 0x29, 0x20, 0x3a, 0x20, 0x5f, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x28, 0x7b, 0x7d, 0x2c, 0x20, + 0x6f, 0x62, 0x6a, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x73, + 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x63, 0x65, 0x70, 0x74, 0x6f, 0x72, + 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x62, + 0x6a, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x74, 0x68, 0x65, 0x6e, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x6f, 0x62, 0x6a, 0x2e, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x54, 0x68, 0x65, 0x20, 0x70, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x20, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, + 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x20, 0x69, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x22, + 0x74, 0x61, 0x70, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x22, 0x20, 0x61, 0x20, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x20, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x2c, 0x20, 0x69, 0x6e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6f, 0x72, + 0x64, 0x65, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x70, 0x65, 0x72, 0x66, 0x6f, + 0x72, 0x6d, 0x20, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x20, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, + 0x64, 0x69, 0x61, 0x74, 0x65, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x20, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, + 0x74, 0x61, 0x70, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x63, 0x65, 0x70, 0x74, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x63, 0x65, 0x70, 0x74, + 0x6f, 0x72, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x3b, + 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, 0x63, + 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, + 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x60, 0x69, 0x73, 0x45, 0x71, + 0x75, 0x61, 0x6c, 0x60, 0x2e, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x65, 0x71, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x61, 0x2c, 0x20, 0x62, 0x2c, 0x20, 0x61, 0x53, 0x74, 0x61, + 0x63, 0x6b, 0x2c, 0x20, 0x62, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x65, 0x71, 0x75, 0x61, 0x6c, + 0x2e, 0x20, 0x60, 0x30, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x2d, 0x30, 0x60, + 0x2c, 0x20, 0x62, 0x75, 0x74, 0x20, 0x74, 0x68, 0x65, 0x79, 0x20, 0x61, + 0x72, 0x65, 0x6e, 0x27, 0x74, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x6c, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x53, 0x65, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x48, 0x61, 0x72, 0x6d, + 0x6f, 0x6e, 0x79, 0x20, 0x60, 0x65, 0x67, 0x61, 0x6c, 0x60, 0x20, 0x70, + 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x3a, 0x20, 0x68, 0x74, 0x74, + 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x69, 0x6b, 0x69, 0x2e, 0x65, 0x63, 0x6d, + 0x61, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x64, 0x6f, 0x6b, 0x75, 0x2e, 0x70, 0x68, 0x70, 0x3f, 0x69, 0x64, 0x3d, + 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x3a, 0x65, 0x67, 0x61, 0x6c, + 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x61, 0x20, + 0x3d, 0x3d, 0x3d, 0x20, 0x62, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x61, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x30, 0x20, 0x7c, 0x7c, + 0x20, 0x31, 0x20, 0x2f, 0x20, 0x61, 0x20, 0x3d, 0x3d, 0x20, 0x31, 0x20, + 0x2f, 0x20, 0x62, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x41, 0x20, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x20, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, 0x6e, + 0x65, 0x63, 0x65, 0x73, 0x73, 0x61, 0x72, 0x79, 0x20, 0x62, 0x65, 0x63, + 0x61, 0x75, 0x73, 0x65, 0x20, 0x60, 0x6e, 0x75, 0x6c, 0x6c, 0x20, 0x3d, + 0x3d, 0x20, 0x75, 0x6e, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x60, + 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x61, 0x20, + 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x20, 0x7c, 0x7c, 0x20, 0x62, + 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x62, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x55, 0x6e, 0x77, + 0x72, 0x61, 0x70, 0x20, 0x61, 0x6e, 0x79, 0x20, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x64, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x61, 0x20, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x6f, 0x66, 0x20, 0x5f, 0x29, + 0x20, 0x61, 0x20, 0x3d, 0x20, 0x61, 0x2e, 0x5f, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x64, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x62, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x6f, + 0x66, 0x20, 0x5f, 0x29, 0x20, 0x62, 0x20, 0x3d, 0x20, 0x62, 0x2e, 0x5f, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x20, + 0x60, 0x5b, 0x5b, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x5d, 0x5d, 0x60, 0x20, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, + 0x61, 0x72, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x20, 0x3d, 0x20, 0x74, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, + 0x6d, 0x65, 0x20, 0x21, 0x3d, 0x20, 0x74, 0x6f, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x62, 0x29, 0x29, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68, + 0x20, 0x28, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x2c, 0x20, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x64, 0x61, 0x74, 0x65, 0x73, 0x2c, + 0x20, 0x61, 0x6e, 0x64, 0x20, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, + 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, + 0x27, 0x5b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x5d, 0x27, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x74, 0x68, 0x65, + 0x69, 0x72, 0x20, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x20, 0x61, 0x72, 0x65, + 0x20, 0x65, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x3b, + 0x20, 0x74, 0x68, 0x75, 0x73, 0x2c, 0x20, 0x60, 0x22, 0x35, 0x22, 0x60, + 0x20, 0x69, 0x73, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x65, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, + 0x74, 0x20, 0x74, 0x6f, 0x20, 0x60, 0x6e, 0x65, 0x77, 0x20, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x28, 0x22, 0x35, 0x22, 0x29, 0x60, 0x2e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x61, 0x20, 0x3d, 0x3d, 0x20, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x28, 0x62, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x27, 0x5b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x20, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5d, 0x27, 0x3a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x60, 0x4e, 0x61, 0x4e, 0x60, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x65, + 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x2c, 0x20, 0x62, + 0x75, 0x74, 0x20, 0x6e, 0x6f, 0x6e, 0x2d, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x78, 0x69, 0x76, 0x65, 0x2e, 0x20, 0x41, 0x6e, 0x20, 0x60, 0x65, 0x67, + 0x61, 0x6c, 0x60, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, + 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, + 0x6d, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, + 0x20, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x20, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x20, 0x21, 0x3d, + 0x20, 0x2b, 0x61, 0x20, 0x3f, 0x20, 0x62, 0x20, 0x21, 0x3d, 0x20, 0x2b, + 0x62, 0x20, 0x3a, 0x20, 0x28, 0x61, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x20, + 0x3f, 0x20, 0x31, 0x20, 0x2f, 0x20, 0x61, 0x20, 0x3d, 0x3d, 0x20, 0x31, + 0x20, 0x2f, 0x20, 0x62, 0x20, 0x3a, 0x20, 0x61, 0x20, 0x3d, 0x3d, 0x20, + 0x2b, 0x62, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, + 0x61, 0x73, 0x65, 0x20, 0x27, 0x5b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x20, 0x44, 0x61, 0x74, 0x65, 0x5d, 0x27, 0x3a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x27, 0x5b, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x20, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, + 0x5d, 0x27, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x65, 0x72, 0x63, 0x65, 0x20, 0x64, 0x61, + 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x62, 0x6f, 0x6f, 0x6c, + 0x65, 0x61, 0x6e, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x6e, 0x75, 0x6d, 0x65, + 0x72, 0x69, 0x63, 0x20, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2e, 0x20, 0x44, 0x61, + 0x74, 0x65, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, + 0x61, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x69, + 0x72, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x20, 0x4e, 0x6f, 0x74, 0x65, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x20, + 0x64, 0x61, 0x74, 0x65, 0x73, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x6d, + 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x72, + 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x6f, 0x66, 0x20, 0x60, 0x4e, 0x61, 0x4e, 0x60, 0x20, 0x61, + 0x72, 0x65, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x65, 0x71, 0x75, 0x69, 0x76, + 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x2b, 0x61, + 0x20, 0x3d, 0x3d, 0x20, 0x2b, 0x62, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x67, 0x45, 0x78, 0x70, 0x73, + 0x20, 0x61, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, + 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x69, 0x72, 0x20, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x66, 0x6c, 0x61, 0x67, 0x73, + 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65, + 0x20, 0x27, 0x5b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x52, 0x65, + 0x67, 0x45, 0x78, 0x70, 0x5d, 0x27, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, + 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x62, + 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x26, 0x26, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x61, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x20, 0x3d, + 0x3d, 0x20, 0x62, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x20, 0x26, + 0x26, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x6c, 0x69, 0x6e, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x62, 0x2e, 0x6d, 0x75, + 0x6c, 0x74, 0x69, 0x6c, 0x69, 0x6e, 0x65, 0x20, 0x26, 0x26, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x61, 0x2e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, + 0x73, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x62, 0x2e, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x43, 0x61, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x79, + 0x70, 0x65, 0x6f, 0x66, 0x20, 0x61, 0x20, 0x21, 0x3d, 0x20, 0x27, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x27, 0x20, 0x7c, 0x7c, 0x20, 0x74, 0x79, + 0x70, 0x65, 0x6f, 0x66, 0x20, 0x62, 0x20, 0x21, 0x3d, 0x20, 0x27, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x27, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x73, 0x73, 0x75, 0x6d, 0x65, 0x20, + 0x65, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x63, 0x79, 0x63, 0x6c, 0x69, 0x63, 0x20, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, + 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6e, 0x67, 0x20, + 0x63, 0x79, 0x63, 0x6c, 0x69, 0x63, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x20, 0x69, 0x73, 0x20, 0x61, 0x64, 0x61, 0x70, 0x74, 0x65, 0x64, 0x20, + 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x45, 0x53, 0x20, 0x35, 0x2e, 0x31, 0x20, + 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x31, 0x35, 0x2e, 0x31, + 0x32, 0x2e, 0x33, 0x2c, 0x20, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, + 0x74, 0x20, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x60, 0x4a, 0x4f, 0x60, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, + 0x72, 0x20, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3d, 0x20, 0x61, + 0x53, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, + 0x28, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x2d, 0x2d, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4c, 0x69, + 0x6e, 0x65, 0x61, 0x72, 0x20, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, + 0x20, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, + 0x20, 0x69, 0x73, 0x20, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x65, 0x6c, + 0x79, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x20, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x61, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x5b, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5d, 0x20, 0x3d, 0x3d, 0x20, 0x61, + 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x62, 0x53, 0x74, + 0x61, 0x63, 0x6b, 0x5b, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5d, 0x20, + 0x3d, 0x3d, 0x20, 0x62, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x64, 0x64, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x66, 0x69, 0x72, 0x73, 0x74, 0x20, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, + 0x74, 0x61, 0x63, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x72, 0x61, 0x76, + 0x65, 0x72, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x53, 0x74, 0x61, 0x63, + 0x6b, 0x2e, 0x70, 0x75, 0x73, 0x68, 0x28, 0x61, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x62, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x70, 0x75, + 0x73, 0x68, 0x28, 0x62, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, + 0x61, 0x72, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x3d, 0x20, 0x30, 0x2c, + 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x74, 0x72, + 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, + 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x6c, 0x79, 0x20, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, + 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x20, 0x3d, 0x3d, 0x20, + 0x27, 0x5b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x41, 0x72, 0x72, + 0x61, 0x79, 0x5d, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, + 0x20, 0x61, 0x72, 0x72, 0x61, 0x79, 0x20, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x64, 0x65, 0x74, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x65, 0x20, 0x69, 0x66, 0x20, 0x61, 0x20, 0x64, 0x65, 0x65, + 0x70, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, + 0x20, 0x69, 0x73, 0x20, 0x6e, 0x65, 0x63, 0x65, 0x73, 0x73, 0x61, 0x72, + 0x79, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x69, 0x7a, + 0x65, 0x20, 0x3d, 0x20, 0x61, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x3d, 0x3d, + 0x20, 0x62, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, 0x65, 0x65, 0x70, 0x20, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x69, 0x67, 0x6e, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x6e, 0x6f, 0x6e, 0x2d, 0x6e, 0x75, + 0x6d, 0x65, 0x72, 0x69, 0x63, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x73, 0x69, 0x7a, + 0x65, 0x2d, 0x2d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x28, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x65, 0x71, 0x28, 0x61, + 0x5b, 0x73, 0x69, 0x7a, 0x65, 0x5d, 0x2c, 0x20, 0x62, 0x5b, 0x73, 0x69, + 0x7a, 0x65, 0x5d, 0x2c, 0x20, 0x61, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x2c, + 0x20, 0x62, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x29, 0x29, 0x29, 0x20, 0x62, + 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x64, + 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x20, 0x61, 0x72, + 0x65, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x65, 0x71, 0x75, 0x69, 0x76, 0x61, + 0x6c, 0x65, 0x6e, 0x74, 0x2c, 0x20, 0x62, 0x75, 0x74, 0x20, 0x60, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x60, 0x73, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x64, 0x69, + 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x66, 0x72, 0x61, 0x6d, + 0x65, 0x73, 0x20, 0x61, 0x72, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, 0x43, 0x74, 0x6f, 0x72, 0x20, + 0x3d, 0x20, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x62, 0x43, 0x74, 0x6f, 0x72, 0x20, 0x3d, + 0x20, 0x62, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x6f, 0x72, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x61, 0x43, 0x74, 0x6f, 0x72, 0x20, 0x21, 0x3d, 0x3d, 0x20, + 0x62, 0x43, 0x74, 0x6f, 0x72, 0x20, 0x26, 0x26, 0x20, 0x21, 0x28, 0x5f, + 0x2e, 0x69, 0x73, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x61, 0x43, 0x74, 0x6f, 0x72, 0x29, 0x20, 0x26, 0x26, 0x20, 0x28, 0x61, + 0x43, 0x74, 0x6f, 0x72, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x6f, 0x66, 0x20, 0x61, 0x43, 0x74, 0x6f, 0x72, 0x29, 0x20, 0x26, + 0x26, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5f, 0x2e, 0x69, + 0x73, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x62, 0x43, + 0x74, 0x6f, 0x72, 0x29, 0x20, 0x26, 0x26, 0x20, 0x28, 0x62, 0x43, 0x74, + 0x6f, 0x72, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x6f, + 0x66, 0x20, 0x62, 0x43, 0x74, 0x6f, 0x72, 0x29, 0x29, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x44, 0x65, 0x65, 0x70, 0x20, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x28, 0x76, 0x61, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x20, 0x69, 0x6e, 0x20, + 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x5f, 0x2e, 0x68, 0x61, 0x73, 0x28, 0x61, + 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x70, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x20, 0x6f, 0x66, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2b, 0x2b, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, + 0x65, 0x65, 0x70, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x20, + 0x65, 0x61, 0x63, 0x68, 0x20, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x2e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, + 0x66, 0x20, 0x28, 0x21, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, + 0x3d, 0x20, 0x5f, 0x2e, 0x68, 0x61, 0x73, 0x28, 0x62, 0x2c, 0x20, 0x6b, + 0x65, 0x79, 0x29, 0x20, 0x26, 0x26, 0x20, 0x65, 0x71, 0x28, 0x61, 0x5b, + 0x6b, 0x65, 0x79, 0x5d, 0x2c, 0x20, 0x62, 0x5b, 0x6b, 0x65, 0x79, 0x5d, + 0x2c, 0x20, 0x61, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x2c, 0x20, 0x62, 0x53, + 0x74, 0x61, 0x63, 0x6b, 0x29, 0x29, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, + 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x45, 0x6e, 0x73, 0x75, 0x72, 0x65, + 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x62, 0x6f, 0x74, 0x68, 0x20, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x61, 0x6d, 0x65, 0x20, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x6b, 0x65, 0x79, 0x20, 0x69, + 0x6e, 0x20, 0x62, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x5f, 0x2e, 0x68, + 0x61, 0x73, 0x28, 0x62, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x20, 0x26, + 0x26, 0x20, 0x21, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x2d, 0x2d, 0x29, 0x29, + 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x21, + 0x73, 0x69, 0x7a, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x66, 0x69, 0x72, 0x73, 0x74, 0x20, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x73, 0x74, 0x61, 0x63, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x72, 0x61, + 0x76, 0x65, 0x72, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x53, 0x74, 0x61, + 0x63, 0x6b, 0x2e, 0x70, 0x6f, 0x70, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x62, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x70, 0x6f, 0x70, + 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, + 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x50, 0x65, + 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x61, 0x20, 0x64, 0x65, 0x65, 0x70, + 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x20, + 0x74, 0x6f, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x20, 0x69, 0x66, 0x20, + 0x74, 0x77, 0x6f, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x2e, 0x0a, 0x20, + 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x20, 0x3d, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x61, 0x2c, + 0x20, 0x62, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x65, 0x71, 0x28, 0x61, 0x2c, 0x20, 0x62, + 0x2c, 0x20, 0x5b, 0x5d, 0x2c, 0x20, 0x5b, 0x5d, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x73, + 0x20, 0x61, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x20, 0x61, 0x72, 0x72, + 0x61, 0x79, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x20, + 0x6f, 0x72, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x3f, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x6e, + 0x20, 0x22, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x20, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x20, 0x68, 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x20, 0x65, + 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x6f, 0x77, + 0x6e, 0x2d, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x20, 0x3d, 0x3d, 0x20, 0x6e, + 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x5f, 0x2e, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x28, + 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7c, 0x7c, 0x20, 0x5f, 0x2e, 0x69, 0x73, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x29, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x2e, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x30, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x76, + 0x61, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x20, 0x69, 0x6e, 0x20, 0x6f, 0x62, + 0x6a, 0x29, 0x20, 0x69, 0x66, 0x20, 0x28, 0x5f, 0x2e, 0x68, 0x61, 0x73, + 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x29, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x73, 0x20, 0x61, 0x20, 0x67, + 0x69, 0x76, 0x65, 0x6e, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x61, + 0x20, 0x44, 0x4f, 0x4d, 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x3f, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x45, 0x6c, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x21, 0x21, 0x28, + 0x6f, 0x62, 0x6a, 0x20, 0x26, 0x26, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, + 0x31, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x49, 0x73, 0x20, 0x61, 0x20, 0x67, 0x69, 0x76, 0x65, + 0x6e, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x61, 0x6e, 0x20, 0x61, + 0x72, 0x72, 0x61, 0x79, 0x3f, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, + 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, + 0x45, 0x43, 0x4d, 0x41, 0x35, 0x27, 0x73, 0x20, 0x6e, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x69, 0x73, 0x41, + 0x72, 0x72, 0x61, 0x79, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x41, + 0x72, 0x72, 0x61, 0x79, 0x20, 0x3d, 0x20, 0x6e, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x49, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x20, 0x7c, 0x7c, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x74, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x3d, 0x3d, + 0x20, 0x27, 0x5b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x41, 0x72, + 0x72, 0x61, 0x79, 0x5d, 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x73, 0x20, 0x61, 0x20, 0x67, + 0x69, 0x76, 0x65, 0x6e, 0x20, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x20, 0x61, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x3f, + 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x20, 0x3d, + 0x3d, 0x3d, 0x20, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x6f, 0x62, + 0x6a, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x41, 0x64, 0x64, 0x20, 0x73, 0x6f, 0x6d, 0x65, 0x20, + 0x69, 0x73, 0x54, 0x79, 0x70, 0x65, 0x20, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x73, 0x3a, 0x20, 0x69, 0x73, 0x41, 0x72, 0x67, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x2c, 0x20, 0x69, 0x73, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x69, 0x73, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x2c, 0x20, 0x69, 0x73, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x2c, + 0x20, 0x69, 0x73, 0x44, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x69, 0x73, 0x52, + 0x65, 0x67, 0x45, 0x78, 0x70, 0x2e, 0x0a, 0x20, 0x20, 0x65, 0x61, 0x63, + 0x68, 0x28, 0x5b, 0x27, 0x41, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x27, 0x2c, 0x20, 0x27, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x27, 0x2c, 0x20, 0x27, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x27, + 0x2c, 0x20, 0x27, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x27, 0x2c, 0x20, + 0x27, 0x44, 0x61, 0x74, 0x65, 0x27, 0x2c, 0x20, 0x27, 0x52, 0x65, 0x67, + 0x45, 0x78, 0x70, 0x27, 0x5d, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x5f, 0x5b, 0x27, 0x69, 0x73, 0x27, 0x20, 0x2b, + 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x74, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x63, + 0x61, 0x6c, 0x6c, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x3d, 0x3d, 0x20, + 0x27, 0x5b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x27, 0x20, 0x2b, + 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x2b, 0x20, 0x27, 0x5d, 0x27, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x29, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x65, 0x20, 0x61, 0x20, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x20, + 0x69, 0x6e, 0x20, 0x62, 0x72, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x73, 0x20, + 0x28, 0x61, 0x68, 0x65, 0x6d, 0x2c, 0x20, 0x49, 0x45, 0x29, 0x2c, 0x20, + 0x77, 0x68, 0x65, 0x72, 0x65, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x74, + 0x68, 0x65, 0x72, 0x65, 0x20, 0x69, 0x73, 0x6e, 0x27, 0x74, 0x20, 0x61, + 0x6e, 0x79, 0x20, 0x69, 0x6e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x20, 0x22, 0x41, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x22, 0x20, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x69, + 0x66, 0x20, 0x28, 0x21, 0x5f, 0x2e, 0x69, 0x73, 0x41, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x28, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x5f, 0x2e, 0x69, 0x73, 0x41, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x21, 0x21, 0x28, + 0x6f, 0x62, 0x6a, 0x20, 0x26, 0x26, 0x20, 0x5f, 0x2e, 0x68, 0x61, 0x73, + 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x27, 0x63, 0x61, 0x6c, 0x6c, 0x65, + 0x65, 0x27, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4f, + 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x20, 0x60, 0x69, 0x73, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x60, 0x20, 0x69, 0x66, 0x20, + 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, 0x69, 0x61, 0x74, 0x65, 0x2e, + 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x79, 0x70, 0x65, 0x6f, + 0x66, 0x20, 0x28, 0x2f, 0x2e, 0x2f, 0x29, 0x20, 0x21, 0x3d, 0x3d, 0x20, + 0x27, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x27, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x46, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x74, 0x79, 0x70, 0x65, 0x6f, 0x66, 0x20, 0x6f, 0x62, 0x6a, + 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x27, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x73, + 0x20, 0x61, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x20, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x20, 0x61, 0x20, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x65, + 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x3f, 0x0a, 0x20, 0x20, 0x5f, + 0x2e, 0x69, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x69, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x28, + 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x26, 0x26, 0x20, 0x21, 0x69, 0x73, 0x4e, + 0x61, 0x4e, 0x28, 0x70, 0x61, 0x72, 0x73, 0x65, 0x46, 0x6c, 0x6f, 0x61, + 0x74, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x73, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x20, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x20, 0x60, 0x4e, 0x61, 0x4e, 0x60, 0x3f, 0x20, 0x28, 0x4e, + 0x61, 0x4e, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x6e, + 0x6c, 0x79, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x77, 0x68, + 0x69, 0x63, 0x68, 0x20, 0x64, 0x6f, 0x65, 0x73, 0x20, 0x6e, 0x6f, 0x74, + 0x20, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x20, 0x69, 0x74, 0x73, 0x65, 0x6c, + 0x66, 0x29, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x4e, 0x61, + 0x4e, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x26, + 0x26, 0x20, 0x6f, 0x62, 0x6a, 0x20, 0x21, 0x3d, 0x20, 0x2b, 0x6f, 0x62, + 0x6a, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x49, 0x73, 0x20, 0x61, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x61, 0x20, 0x62, 0x6f, 0x6f, + 0x6c, 0x65, 0x61, 0x6e, 0x3f, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x69, 0x73, + 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x20, 0x3d, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x6f, 0x62, 0x6a, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x74, 0x72, 0x75, + 0x65, 0x20, 0x7c, 0x7c, 0x20, 0x6f, 0x62, 0x6a, 0x20, 0x3d, 0x3d, 0x3d, + 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x20, 0x7c, 0x7c, 0x20, 0x74, 0x6f, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, + 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x5b, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x20, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, + 0x5d, 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x49, 0x73, 0x20, 0x61, 0x20, 0x67, 0x69, 0x76, 0x65, + 0x6e, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x65, 0x71, 0x75, 0x61, + 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3f, 0x0a, 0x20, + 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x4e, 0x75, 0x6c, 0x6c, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x6e, + 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x73, 0x20, 0x61, 0x20, 0x67, 0x69, 0x76, + 0x65, 0x6e, 0x20, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x20, + 0x75, 0x6e, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x3f, 0x0a, 0x20, + 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x55, 0x6e, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x20, + 0x3d, 0x3d, 0x3d, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x30, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x53, + 0x68, 0x6f, 0x72, 0x74, 0x63, 0x75, 0x74, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x69, 0x66, 0x20, 0x61, 0x6e, 0x20, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x68, 0x61, 0x73, 0x20, 0x61, + 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x79, 0x20, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6f, 0x6e, 0x20, 0x69, 0x74, 0x73, + 0x65, 0x6c, 0x66, 0x20, 0x28, 0x69, 0x6e, 0x20, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x2c, 0x20, 0x6e, 0x6f, 0x74, + 0x20, 0x6f, 0x6e, 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, + 0x79, 0x70, 0x65, 0x29, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x68, 0x61, + 0x73, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x68, 0x61, 0x73, 0x4f, 0x77, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x79, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x6f, 0x62, 0x6a, 0x2c, + 0x20, 0x6b, 0x65, 0x79, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x20, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, + 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x75, 0x6e, 0x20, 0x55, 0x6e, 0x64, + 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x6a, 0x73, 0x20, 0x69, + 0x6e, 0x20, 0x2a, 0x6e, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, + 0x74, 0x2a, 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x2c, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x60, + 0x5f, 0x60, 0x20, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x20, + 0x74, 0x6f, 0x20, 0x69, 0x74, 0x73, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x20, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x2e, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, + 0x61, 0x20, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, + 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x55, 0x6e, 0x64, 0x65, 0x72, + 0x73, 0x63, 0x6f, 0x72, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x6e, 0x6f, 0x43, 0x6f, 0x6e, 0x66, + 0x6c, 0x69, 0x63, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x5f, 0x20, 0x3d, 0x20, 0x70, 0x72, 0x65, + 0x76, 0x69, 0x6f, 0x75, 0x73, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, + 0x6f, 0x72, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x69, 0x73, 0x3b, 0x0a, 0x20, 0x20, + 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4b, 0x65, 0x65, + 0x70, 0x20, 0x74, 0x68, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x61, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x75, 0x6e, 0x20, + 0x61, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x2a, + 0x2a, 0x6e, 0x2a, 0x2a, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x2e, 0x0a, + 0x20, 0x20, 0x5f, 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6e, 0x2c, 0x20, + 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2c, 0x20, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, 0x63, 0x63, 0x75, 0x6d, 0x20, 0x3d, + 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x28, 0x6e, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x76, 0x61, 0x72, 0x20, + 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x6e, + 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x61, 0x63, 0x63, 0x75, 0x6d, + 0x5b, 0x69, 0x5d, 0x20, 0x3d, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x2c, 0x20, 0x69, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x63, 0x63, 0x75, + 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x20, 0x72, + 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, + 0x72, 0x20, 0x62, 0x65, 0x74, 0x77, 0x65, 0x65, 0x6e, 0x20, 0x6d, 0x69, + 0x6e, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6d, 0x61, 0x78, 0x20, 0x28, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x29, 0x2e, 0x0a, 0x20, + 0x20, 0x5f, 0x2e, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6d, 0x69, 0x6e, + 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x6d, 0x61, 0x78, 0x20, 0x3d, 0x3d, 0x20, + 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x6d, 0x61, 0x78, 0x20, 0x3d, 0x20, 0x6d, 0x69, 0x6e, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x69, 0x6e, 0x20, 0x3d, + 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6d, 0x69, 0x6e, + 0x20, 0x2b, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, + 0x72, 0x28, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x72, 0x61, 0x6e, 0x64, 0x6f, + 0x6d, 0x28, 0x29, 0x20, 0x2a, 0x20, 0x28, 0x6d, 0x61, 0x78, 0x20, 0x2d, + 0x20, 0x6d, 0x69, 0x6e, 0x20, 0x2b, 0x20, 0x31, 0x29, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4c, + 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x48, 0x54, 0x4d, 0x4c, 0x20, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x65, 0x73, 0x63, 0x61, 0x70, 0x69, 0x6e, 0x67, 0x2e, 0x0a, 0x20, + 0x20, 0x76, 0x61, 0x72, 0x20, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4d, + 0x61, 0x70, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, + 0x73, 0x63, 0x61, 0x70, 0x65, 0x3a, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x27, 0x26, 0x27, 0x3a, 0x20, 0x27, 0x26, 0x61, 0x6d, + 0x70, 0x3b, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x27, + 0x3c, 0x27, 0x3a, 0x20, 0x27, 0x26, 0x6c, 0x74, 0x3b, 0x27, 0x2c, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x27, 0x3e, 0x27, 0x3a, 0x20, 0x27, + 0x26, 0x67, 0x74, 0x3b, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x27, 0x22, 0x27, 0x3a, 0x20, 0x27, 0x26, 0x71, 0x75, 0x6f, 0x74, + 0x3b, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x22, 0x27, + 0x22, 0x3a, 0x20, 0x27, 0x26, 0x23, 0x78, 0x32, 0x37, 0x3b, 0x27, 0x2c, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x27, 0x2f, 0x27, 0x3a, 0x20, + 0x27, 0x26, 0x23, 0x78, 0x32, 0x46, 0x3b, 0x27, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x4d, 0x61, 0x70, 0x2e, 0x75, 0x6e, 0x65, 0x73, + 0x63, 0x61, 0x70, 0x65, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x69, 0x6e, 0x76, + 0x65, 0x72, 0x74, 0x28, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4d, 0x61, + 0x70, 0x2e, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x29, 0x3b, 0x0a, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x67, 0x65, 0x78, 0x65, 0x73, + 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x6b, 0x65, 0x79, 0x73, 0x20, 0x61, 0x6e, 0x64, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x64, 0x20, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, + 0x6c, 0x79, 0x20, 0x61, 0x62, 0x6f, 0x76, 0x65, 0x2e, 0x0a, 0x20, 0x20, + 0x76, 0x61, 0x72, 0x20, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, + 0x67, 0x65, 0x78, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x3a, 0x20, 0x20, 0x20, + 0x6e, 0x65, 0x77, 0x20, 0x52, 0x65, 0x67, 0x45, 0x78, 0x70, 0x28, 0x27, + 0x5b, 0x27, 0x20, 0x2b, 0x20, 0x5f, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x28, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4d, 0x61, 0x70, 0x2e, 0x65, 0x73, + 0x63, 0x61, 0x70, 0x65, 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x27, + 0x27, 0x29, 0x20, 0x2b, 0x20, 0x27, 0x5d, 0x27, 0x2c, 0x20, 0x27, 0x67, + 0x27, 0x29, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x75, 0x6e, 0x65, 0x73, + 0x63, 0x61, 0x70, 0x65, 0x3a, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x52, 0x65, + 0x67, 0x45, 0x78, 0x70, 0x28, 0x27, 0x28, 0x27, 0x20, 0x2b, 0x20, 0x5f, + 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x28, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x4d, 0x61, 0x70, 0x2e, 0x75, 0x6e, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, + 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x27, 0x7c, 0x27, 0x29, 0x20, + 0x2b, 0x20, 0x27, 0x29, 0x27, 0x2c, 0x20, 0x27, 0x67, 0x27, 0x29, 0x0a, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x65, 0x73, 0x63, 0x61, 0x70, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x75, 0x6e, 0x65, 0x73, 0x63, 0x61, 0x70, 0x69, 0x6e, 0x67, + 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x74, 0x6f, 0x2f, + 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x48, 0x54, 0x4d, 0x4c, 0x20, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x65, 0x61, 0x63, 0x68, 0x28, 0x5b, 0x27, + 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x27, 0x2c, 0x20, 0x27, 0x75, 0x6e, + 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x27, 0x5d, 0x2c, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x5f, 0x5b, 0x6d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5d, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x3d, 0x3d, 0x20, + 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x27, 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x28, 0x27, 0x27, 0x20, 0x2b, 0x20, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x28, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, + 0x67, 0x65, 0x78, 0x65, 0x73, 0x5b, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x5d, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4d, 0x61, 0x70, 0x5b, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x5d, 0x5b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5d, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x29, 0x3b, + 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x66, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x20, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x79, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x6e, + 0x20, 0x69, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x20, 0x69, 0x74, 0x3b, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x77, 0x69, + 0x73, 0x65, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x69, + 0x74, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x20, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, + 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, + 0x3d, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5b, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x79, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x2e, 0x69, 0x73, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x29, 0x20, 0x3f, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x63, + 0x61, 0x6c, 0x6c, 0x28, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x29, 0x20, + 0x3a, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x64, 0x64, 0x20, + 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x55, 0x6e, 0x64, + 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x6d, 0x69, 0x78, 0x69, + 0x6e, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x65, 0x61, 0x63, 0x68, 0x28, 0x5f, 0x2e, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x2c, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6e, 0x61, 0x6d, 0x65, + 0x29, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x20, 0x3d, 0x20, 0x5f, 0x5b, 0x6e, 0x61, + 0x6d, 0x65, 0x5d, 0x20, 0x3d, 0x20, 0x6f, 0x62, 0x6a, 0x5b, 0x6e, 0x61, + 0x6d, 0x65, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x5b, 0x6e, + 0x61, 0x6d, 0x65, 0x5d, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, 0x72, 0x67, 0x73, + 0x20, 0x3d, 0x20, 0x5b, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x5f, 0x77, 0x72, + 0x61, 0x70, 0x70, 0x65, 0x64, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x70, 0x75, 0x73, 0x68, 0x2e, 0x61, 0x70, 0x70, + 0x6c, 0x79, 0x28, 0x61, 0x72, 0x67, 0x73, 0x2c, 0x20, 0x61, 0x72, 0x67, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, + 0x74, 0x68, 0x69, 0x73, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x2e, 0x61, + 0x70, 0x70, 0x6c, 0x79, 0x28, 0x5f, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x73, + 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x20, 0x61, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, + 0x65, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x20, 0x69, 0x64, + 0x20, 0x28, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x77, 0x69, 0x74, + 0x68, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x74, 0x69, + 0x72, 0x65, 0x20, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x20, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x29, 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x55, 0x73, 0x65, 0x66, 0x75, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x20, 0x44, 0x4f, + 0x4d, 0x20, 0x69, 0x64, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, + 0x20, 0x69, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x20, 0x3d, + 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x49, 0x64, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x69, 0x64, + 0x20, 0x3d, 0x20, 0x2b, 0x2b, 0x69, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x20, 0x2b, 0x20, 0x27, 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x20, 0x3f, 0x20, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x20, + 0x2b, 0x20, 0x69, 0x64, 0x20, 0x3a, 0x20, 0x69, 0x64, 0x3b, 0x0a, 0x20, + 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x42, 0x79, + 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2c, 0x20, 0x55, 0x6e, + 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x20, 0x75, 0x73, 0x65, + 0x73, 0x20, 0x45, 0x52, 0x42, 0x2d, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x20, + 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x64, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x73, 0x2c, 0x20, 0x63, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x20, 0x74, 0x68, 0x65, 0x0a, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x69, 0x6e, 0x67, 0x20, 0x74, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x73, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x75, 0x73, 0x65, 0x20, + 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, + 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x0a, + 0x20, 0x20, 0x5f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, + 0x65, 0x20, 0x20, 0x20, 0x20, 0x3a, 0x20, 0x2f, 0x3c, 0x25, 0x28, 0x5b, + 0x5c, 0x73, 0x5c, 0x53, 0x5d, 0x2b, 0x3f, 0x29, 0x25, 0x3e, 0x2f, 0x67, + 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x6f, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3a, 0x20, 0x2f, 0x3c, 0x25, 0x3d, + 0x28, 0x5b, 0x5c, 0x73, 0x5c, 0x53, 0x5d, 0x2b, 0x3f, 0x29, 0x25, 0x3e, + 0x2f, 0x67, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x73, 0x63, 0x61, + 0x70, 0x65, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3a, 0x20, 0x2f, 0x3c, + 0x25, 0x2d, 0x28, 0x5b, 0x5c, 0x73, 0x5c, 0x53, 0x5d, 0x2b, 0x3f, 0x29, + 0x25, 0x3e, 0x2f, 0x67, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x57, 0x68, 0x65, 0x6e, 0x20, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x69, 0x7a, 0x69, 0x6e, 0x67, 0x20, 0x60, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x60, 0x2c, 0x20, 0x69, 0x66, 0x20, 0x79, 0x6f, 0x75, 0x20, + 0x64, 0x6f, 0x6e, 0x27, 0x74, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20, 0x74, + 0x6f, 0x20, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x20, 0x61, 0x6e, 0x0a, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x6f, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x65, 0x76, 0x61, 0x6c, + 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x72, 0x20, 0x65, 0x73, + 0x63, 0x61, 0x70, 0x69, 0x6e, 0x67, 0x20, 0x72, 0x65, 0x67, 0x65, 0x78, + 0x2c, 0x20, 0x77, 0x65, 0x20, 0x6e, 0x65, 0x65, 0x64, 0x20, 0x6f, 0x6e, + 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x69, 0x73, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x67, 0x75, 0x61, 0x72, 0x61, 0x6e, 0x74, 0x65, 0x65, + 0x64, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x2e, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6e, 0x6f, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x20, 0x3d, 0x20, 0x2f, 0x28, 0x2e, 0x29, + 0x5e, 0x2f, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43, 0x65, + 0x72, 0x74, 0x61, 0x69, 0x6e, 0x20, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, + 0x74, 0x65, 0x72, 0x73, 0x20, 0x6e, 0x65, 0x65, 0x64, 0x20, 0x74, 0x6f, + 0x20, 0x62, 0x65, 0x20, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x20, + 0x73, 0x6f, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x79, + 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x70, 0x75, 0x74, 0x20, + 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x61, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x6c, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x6c, 0x2e, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x65, 0x73, + 0x63, 0x61, 0x70, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x22, 0x27, 0x22, 0x3a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x22, 0x27, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x27, 0x5c, 0x5c, + 0x27, 0x3a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x27, 0x5c, 0x5c, 0x27, 0x2c, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x27, 0x5c, 0x72, 0x27, 0x3a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x27, 0x72, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x27, 0x5c, 0x6e, 0x27, 0x3a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x27, 0x6e, + 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x27, 0x5c, 0x74, 0x27, 0x3a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x27, 0x74, 0x27, 0x2c, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x27, 0x5c, 0x75, 0x32, 0x30, 0x32, 0x38, 0x27, 0x3a, 0x20, + 0x27, 0x75, 0x32, 0x30, 0x32, 0x38, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x27, 0x5c, 0x75, 0x32, 0x30, 0x32, 0x39, 0x27, 0x3a, 0x20, 0x27, + 0x75, 0x32, 0x30, 0x32, 0x39, 0x27, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x65, 0x73, 0x63, 0x61, 0x70, + 0x65, 0x72, 0x20, 0x3d, 0x20, 0x2f, 0x5c, 0x5c, 0x7c, 0x27, 0x7c, 0x5c, + 0x72, 0x7c, 0x5c, 0x6e, 0x7c, 0x5c, 0x74, 0x7c, 0x5c, 0x75, 0x32, 0x30, + 0x32, 0x38, 0x7c, 0x5c, 0x75, 0x32, 0x30, 0x32, 0x39, 0x2f, 0x67, 0x3b, + 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4a, 0x61, 0x76, 0x61, 0x53, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x2d, + 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x2c, 0x20, + 0x73, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x4a, + 0x6f, 0x68, 0x6e, 0x20, 0x52, 0x65, 0x73, 0x69, 0x67, 0x27, 0x73, 0x20, + 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x55, 0x6e, 0x64, + 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x20, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x73, 0x20, 0x61, 0x72, 0x62, 0x69, 0x74, 0x72, 0x61, 0x72, 0x79, + 0x20, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x73, 0x2c, + 0x20, 0x70, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x73, 0x20, 0x77, + 0x68, 0x69, 0x74, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2c, 0x0a, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x63, 0x6f, 0x72, 0x72, + 0x65, 0x63, 0x74, 0x6c, 0x79, 0x20, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, + 0x73, 0x20, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x73, 0x20, 0x77, 0x69, 0x74, + 0x68, 0x69, 0x6e, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, + 0x61, 0x74, 0x65, 0x64, 0x20, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x0a, 0x20, + 0x20, 0x5f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, + 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x74, + 0x65, 0x78, 0x74, 0x2c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x20, 0x73, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, + 0x72, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x5f, 0x2e, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x28, 0x7b, 0x7d, 0x2c, 0x20, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2c, 0x20, 0x5f, 0x2e, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x20, 0x64, 0x65, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x65, 0x72, 0x73, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, + 0x6f, 0x6e, 0x65, 0x20, 0x72, 0x65, 0x67, 0x75, 0x6c, 0x61, 0x72, 0x20, + 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x76, + 0x69, 0x61, 0x20, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65, + 0x77, 0x20, 0x52, 0x65, 0x67, 0x45, 0x78, 0x70, 0x28, 0x5b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x2e, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x20, 0x7c, 0x7c, + 0x20, 0x6e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x29, 0x2e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x28, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x7c, 0x7c, + 0x20, 0x6e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x29, 0x2e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x28, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x65, 0x76, + 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x20, 0x7c, 0x7c, 0x20, 0x6e, 0x6f, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x29, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x5d, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, + 0x28, 0x27, 0x7c, 0x27, 0x29, 0x20, 0x2b, 0x20, 0x27, 0x7c, 0x24, 0x27, + 0x2c, 0x20, 0x27, 0x67, 0x27, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x20, 0x65, 0x73, 0x63, + 0x61, 0x70, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x20, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x20, 0x61, 0x70, + 0x70, 0x72, 0x6f, 0x70, 0x72, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x2e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x76, 0x61, 0x72, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x3d, + 0x20, 0x22, 0x5f, 0x5f, 0x70, 0x2b, 0x3d, 0x27, 0x22, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x72, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x28, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2c, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x2c, 0x20, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x2c, + 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x65, + 0x2c, 0x20, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x2c, 0x20, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x2b, + 0x3d, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x73, 0x6c, 0x69, 0x63, 0x65, + 0x28, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x65, 0x73, 0x63, + 0x61, 0x70, 0x65, 0x72, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x28, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x29, 0x20, 0x7b, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x27, 0x5c, 0x5c, 0x27, 0x20, + 0x2b, 0x20, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x73, 0x5b, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x5d, 0x3b, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x73, 0x63, + 0x61, 0x70, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x2b, 0x3d, + 0x20, 0x22, 0x27, 0x2b, 0x5c, 0x6e, 0x28, 0x28, 0x5f, 0x5f, 0x74, 0x3d, + 0x28, 0x22, 0x20, 0x2b, 0x20, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x20, + 0x2b, 0x20, 0x22, 0x29, 0x29, 0x3d, 0x3d, 0x6e, 0x75, 0x6c, 0x6c, 0x3f, + 0x27, 0x27, 0x3a, 0x5f, 0x2e, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x28, + 0x5f, 0x5f, 0x74, 0x29, 0x29, 0x2b, 0x5c, 0x6e, 0x27, 0x22, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x6f, 0x6c, 0x61, 0x74, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, + 0x2b, 0x3d, 0x20, 0x22, 0x27, 0x2b, 0x5c, 0x6e, 0x28, 0x28, 0x5f, 0x5f, + 0x74, 0x3d, 0x28, 0x22, 0x20, 0x2b, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x2b, 0x20, 0x22, 0x29, 0x29, + 0x3d, 0x3d, 0x6e, 0x75, 0x6c, 0x6c, 0x3f, 0x27, 0x27, 0x3a, 0x5f, 0x5f, + 0x74, 0x29, 0x2b, 0x5c, 0x6e, 0x27, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, + 0x66, 0x20, 0x28, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x2b, 0x3d, 0x20, 0x22, 0x27, 0x3b, + 0x5c, 0x6e, 0x22, 0x20, 0x2b, 0x20, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, + 0x74, 0x65, 0x20, 0x2b, 0x20, 0x22, 0x5c, 0x6e, 0x5f, 0x5f, 0x70, 0x2b, + 0x3d, 0x27, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x20, 0x3d, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x20, 0x2b, 0x20, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x20, 0x2b, 0x3d, 0x20, 0x22, 0x27, 0x3b, 0x5c, + 0x6e, 0x22, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x49, 0x66, 0x20, 0x61, 0x20, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2c, 0x20, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x20, 0x69, 0x6e, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x20, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x21, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x29, 0x20, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x20, 0x3d, 0x20, 0x27, 0x77, 0x69, 0x74, 0x68, + 0x28, 0x6f, 0x62, 0x6a, 0x7c, 0x7c, 0x7b, 0x7d, 0x29, 0x7b, 0x5c, 0x6e, + 0x27, 0x20, 0x2b, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x2b, + 0x20, 0x27, 0x7d, 0x5c, 0x6e, 0x27, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x3d, 0x20, 0x22, 0x76, + 0x61, 0x72, 0x20, 0x5f, 0x5f, 0x74, 0x2c, 0x5f, 0x5f, 0x70, 0x3d, 0x27, + 0x27, 0x2c, 0x5f, 0x5f, 0x6a, 0x3d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6a, 0x6f, + 0x69, 0x6e, 0x2c, 0x22, 0x20, 0x2b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x22, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x3d, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x7b, 0x5f, 0x5f, 0x70, 0x2b, 0x3d, + 0x5f, 0x5f, 0x6a, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x72, 0x67, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2c, 0x27, 0x27, 0x29, 0x3b, 0x7d, + 0x3b, 0x5c, 0x6e, 0x22, 0x20, 0x2b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x2b, 0x20, 0x22, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, 0x5f, 0x70, 0x3b, 0x5c, 0x6e, + 0x22, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x2e, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x20, + 0x7c, 0x7c, 0x20, 0x27, 0x6f, 0x62, 0x6a, 0x27, 0x2c, 0x20, 0x27, 0x5f, + 0x27, 0x2c, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68, 0x20, + 0x28, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x65, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x3d, 0x20, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x65, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, 0x64, 0x61, 0x74, + 0x61, 0x2c, 0x20, 0x5f, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, + 0x61, 0x72, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, + 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x64, + 0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x72, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x74, 0x68, 0x69, 0x73, + 0x2c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x20, 0x5f, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x20, 0x61, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6e, + 0x76, 0x65, 0x6e, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x20, 0x3d, 0x20, 0x27, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x27, 0x20, 0x2b, 0x20, 0x28, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x2e, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x20, + 0x7c, 0x7c, 0x20, 0x27, 0x6f, 0x62, 0x6a, 0x27, 0x29, 0x20, 0x2b, 0x20, + 0x27, 0x29, 0x7b, 0x5c, 0x6e, 0x27, 0x20, 0x2b, 0x20, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x20, 0x2b, 0x20, 0x27, 0x7d, 0x27, 0x3b, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x64, 0x64, 0x20, + 0x61, 0x20, 0x22, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x22, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x77, 0x68, 0x69, 0x63, + 0x68, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, + 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x5f, + 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x28, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x4f, 0x4f, 0x50, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x2d, + 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, + 0x2d, 0x2d, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49, 0x66, 0x20, 0x55, + 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x20, 0x69, 0x73, + 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x20, 0x61, 0x73, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x69, + 0x74, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x61, 0x20, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x20, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x0a, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, + 0x20, 0x4f, 0x4f, 0x2d, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x2e, 0x20, 0x54, + 0x68, 0x69, 0x73, 0x20, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x20, + 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x20, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x65, + 0x64, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x6f, + 0x66, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, + 0x65, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x20, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x20, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, 0x20, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x48, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x6f, 0x20, 0x63, 0x6f, + 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x20, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, + 0x69, 0x61, 0x74, 0x65, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x2e, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x28, 0x6f, 0x62, 0x6a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x69, 0x73, + 0x2e, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x20, 0x3f, 0x20, 0x5f, 0x28, + 0x6f, 0x62, 0x6a, 0x29, 0x2e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x28, 0x29, + 0x20, 0x3a, 0x20, 0x6f, 0x62, 0x6a, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, 0x64, 0x64, 0x20, 0x61, + 0x6c, 0x6c, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x55, 0x6e, + 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x20, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x6d, 0x69, + 0x78, 0x69, 0x6e, 0x28, 0x5f, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x41, 0x64, 0x64, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x6d, 0x75, + 0x74, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x74, 0x6f, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x2e, 0x0a, 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, 0x28, 0x5b, 0x27, 0x70, + 0x6f, 0x70, 0x27, 0x2c, 0x20, 0x27, 0x70, 0x75, 0x73, 0x68, 0x27, 0x2c, + 0x20, 0x27, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x27, 0x2c, 0x20, + 0x27, 0x73, 0x68, 0x69, 0x66, 0x74, 0x27, 0x2c, 0x20, 0x27, 0x73, 0x6f, + 0x72, 0x74, 0x27, 0x2c, 0x20, 0x27, 0x73, 0x70, 0x6c, 0x69, 0x63, 0x65, + 0x27, 0x2c, 0x20, 0x27, 0x75, 0x6e, 0x73, 0x68, 0x69, 0x66, 0x74, 0x27, + 0x5d, 0x2c, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, + 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x76, 0x61, 0x72, 0x20, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x20, 0x3d, + 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5b, + 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x5f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x5b, 0x6e, + 0x61, 0x6d, 0x65, 0x5d, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6f, 0x62, 0x6a, 0x20, 0x3d, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x2e, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x64, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x6f, 0x62, + 0x6a, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x28, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x73, + 0x68, 0x69, 0x66, 0x74, 0x27, 0x20, 0x7c, 0x7c, 0x20, 0x6e, 0x61, 0x6d, + 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x73, 0x70, 0x6c, 0x69, 0x63, 0x65, + 0x27, 0x29, 0x20, 0x26, 0x26, 0x20, 0x6f, 0x62, 0x6a, 0x2e, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x5b, 0x30, + 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x63, + 0x61, 0x6c, 0x6c, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2c, 0x20, 0x6f, 0x62, + 0x6a, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, + 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41, + 0x64, 0x64, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6f, 0x72, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2e, 0x0a, + 0x20, 0x20, 0x65, 0x61, 0x63, 0x68, 0x28, 0x5b, 0x27, 0x63, 0x6f, 0x6e, + 0x63, 0x61, 0x74, 0x27, 0x2c, 0x20, 0x27, 0x6a, 0x6f, 0x69, 0x6e, 0x27, + 0x2c, 0x20, 0x27, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x27, 0x5d, 0x2c, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x6e, 0x61, 0x6d, + 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, + 0x20, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x20, 0x3d, 0x20, 0x41, 0x72, + 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5b, 0x6e, 0x61, 0x6d, + 0x65, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x5f, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x5b, 0x6e, 0x61, 0x6d, 0x65, + 0x5d, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2c, 0x20, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x79, + 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x64, 0x2c, 0x20, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x5f, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x28, 0x5f, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x74, 0x79, 0x70, 0x65, 0x2c, 0x20, 0x7b, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x53, 0x74, 0x61, 0x72, 0x74, 0x20, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x77, 0x72, + 0x61, 0x70, 0x70, 0x65, 0x64, 0x20, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, + 0x63, 0x6f, 0x72, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, + 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, + 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x69, 0x73, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x2c, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x66, 0x72, 0x6f, + 0x6d, 0x20, 0x61, 0x20, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x20, + 0x61, 0x6e, 0x64, 0x20, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x20, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x69, + 0x73, 0x2e, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x7d, 0x29, 0x3b, + 0x0a, 0x0a, 0x7d, 0x29, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x74, 0x68, + 0x69, 0x73, 0x29, 0x3b, 0x0a, + } +} + +// Underscore.js 1.4.4 +// http://underscorejs.org +// (c) 2009-2013 Jeremy Ashkenas, DocumentCloud Inc. +// Underscore may be freely distributed under the MIT license. diff --git a/vendor/github.com/robertkrimen/otto/underscore/testify b/vendor/github.com/robertkrimen/otto/underscore/testify new file mode 100755 index 000000000..7f6e0f7c1 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/underscore/testify @@ -0,0 +1,84 @@ +#!/usr/bin/env perl + +use strict; +use warnings; + +my $underscore_test = shift @ARGV || ""; +if (!-d $underscore_test) { + print <<_END_; +Usage: + + testify ./underscore/test + + # Should look something like: + arrays.js + chaining.js + collections.js + functions.js + index.html + objects.js + speed.js + utility.js + vendor + +_END_ + if ($underscore_test) { + die "!: Not a directory: $underscore_test\n" + } + exit; +} + +chdir $underscore_test or die "!: $!"; + +my @js = <*.js>; + +for my $file (@js) { + open my $fh, '<', $file or die "!: $!"; + my $tests = join "", <$fh>; + my @tests = $tests =~ m/ + ^(\s{2}test\(.*? + ^\s{2}}\);)$ + /mgxs; + close $fh; + next unless @tests; + print "$file: ", scalar(@tests), "\n"; + my $underscore_name = "underscore_$file"; + $underscore_name =~ s/.js$//; + my $go_file = "${underscore_name}_test.go"; + $go_file =~ s/.js$/.go/; + open $fh, '>', $go_file or die "!: $!"; + + $fh->print(<<_END_); +package otto + +import ( + "testing" +) + +_END_ + + my $count = 0; + for my $test (@tests) { + $test =~ s/`([^`]+)`/<$1>/g; + my ($name) = $test =~ m/^\s*test\(['"]([^'"]+)['"]/; + $fh->print(<<_END_); +// $name +func Test_${underscore_name}_$count(t *testing.T) { + tt(t, func(){ + test := underscoreTest() + + test(` +$test + `) + }) +} + +_END_ + $count++; + } +} + +# test('#779 - delimeters are applied to unescaped text.', 1, function() { +# var template = _.template('<<\nx\n>>', null, {evaluate: /<<(.*?)>>/g}); +# strictEqual(template(), '<<\nx\n>>'); +# }); diff --git a/vendor/github.com/robertkrimen/otto/underscore/underscore.go b/vendor/github.com/robertkrimen/otto/underscore/underscore.go new file mode 100644 index 000000000..714b8f3cf --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/underscore/underscore.go @@ -0,0 +1,49 @@ +/* +Package underscore contains the source for the JavaScript utility-belt library. + + import ( + _ "github.com/robertkrimen/otto/underscore" + ) + // Every Otto runtime will now include underscore + +http://underscorejs.org + +https://github.com/documentcloud/underscore + +By importing this package, you'll automatically load underscore every time you create a new Otto runtime. + +To prevent this behavior, you can do the following: + + import ( + "github.com/robertkrimen/otto/underscore" + ) + + func init() { + underscore.Disable() + } + +*/ +package underscore + +import ( + "github.com/robertkrimen/otto/registry" +) + +var entry *registry.Entry = registry.Register(func() string { + return Source() +}) + +// Enable underscore runtime inclusion. +func Enable() { + entry.Enable() +} + +// Disable underscore runtime inclusion. +func Disable() { + entry.Disable() +} + +// Source returns the underscore source. +func Source() string { + return string(underscore()) +} diff --git a/vendor/github.com/robertkrimen/otto/value.go b/vendor/github.com/robertkrimen/otto/value.go new file mode 100644 index 000000000..90c140604 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/value.go @@ -0,0 +1,1033 @@ +package otto + +import ( + "fmt" + "math" + "reflect" + "strconv" + "unicode/utf16" +) + +type _valueKind int + +const ( + valueUndefined _valueKind = iota + valueNull + valueNumber + valueString + valueBoolean + valueObject + + // These are invalid outside of the runtime + valueEmpty + valueResult + valueReference +) + +// Value is the representation of a JavaScript value. +type Value struct { + kind _valueKind + value interface{} +} + +func (value Value) safe() bool { + return value.kind < valueEmpty +} + +var ( + emptyValue = Value{kind: valueEmpty} + nullValue = Value{kind: valueNull} + falseValue = Value{kind: valueBoolean, value: false} + trueValue = Value{kind: valueBoolean, value: true} +) + +// ToValue will convert an interface{} value to a value digestible by otto/JavaScript +// +// This function will not work for advanced types (struct, map, slice/array, etc.) and +// you should use Otto.ToValue instead. +func ToValue(value interface{}) (Value, error) { + result := Value{} + err := catchPanic(func() { + result = toValue(value) + }) + return result, err +} + +func (value Value) isEmpty() bool { + return value.kind == valueEmpty +} + +// Undefined + +// UndefinedValue will return a Value representing undefined. +func UndefinedValue() Value { + return Value{} +} + +// IsDefined will return false if the value is undefined, and true otherwise. +func (value Value) IsDefined() bool { + return value.kind != valueUndefined +} + +// IsUndefined will return true if the value is undefined, and false otherwise. +func (value Value) IsUndefined() bool { + return value.kind == valueUndefined +} + +// NullValue will return a Value representing null. +func NullValue() Value { + return Value{kind: valueNull} +} + +// IsNull will return true if the value is null, and false otherwise. +func (value Value) IsNull() bool { + return value.kind == valueNull +} + +// --- + +func (value Value) isCallable() bool { + switch value := value.value.(type) { + case *_object: + return value.isCall() + } + return false +} + +// Call the value as a function with the given this value and argument list and +// return the result of invocation. It is essentially equivalent to: +// +// value.apply(thisValue, argumentList) +// +// An undefined value and an error will result if: +// +// 1. There is an error during conversion of the argument list +// 2. The value is not actually a function +// 3. An (uncaught) exception is thrown +// +func (value Value) Call(this Value, argumentList ...interface{}) (Value, error) { + result := Value{} + err := catchPanic(func() { + // FIXME + result = value.call(nil, this, argumentList...) + }) + if !value.safe() { + value = Value{} + } + return result, err +} + +func (value Value) call(rt *_runtime, this Value, argumentList ...interface{}) Value { + switch function := value.value.(type) { + case *_object: + return function.call(this, function.runtime.toValueArray(argumentList...), false, nativeFrame) + } + if rt == nil { + panic("FIXME TypeError") + } + panic(rt.panicTypeError()) +} + +func (value Value) constructSafe(rt *_runtime, this Value, argumentList ...interface{}) (Value, error) { + result := Value{} + err := catchPanic(func() { + result = value.construct(rt, this, argumentList...) + }) + return result, err +} + +func (value Value) construct(rt *_runtime, this Value, argumentList ...interface{}) Value { + switch fn := value.value.(type) { + case *_object: + return fn.construct(fn.runtime.toValueArray(argumentList...)) + } + if rt == nil { + panic("FIXME TypeError") + } + panic(rt.panicTypeError()) +} + +// IsPrimitive will return true if value is a primitive (any kind of primitive). +func (value Value) IsPrimitive() bool { + return !value.IsObject() +} + +// IsBoolean will return true if value is a boolean (primitive). +func (value Value) IsBoolean() bool { + return value.kind == valueBoolean +} + +// IsNumber will return true if value is a number (primitive). +func (value Value) IsNumber() bool { + return value.kind == valueNumber +} + +// IsNaN will return true if value is NaN (or would convert to NaN). +func (value Value) IsNaN() bool { + switch value := value.value.(type) { + case float64: + return math.IsNaN(value) + case float32: + return math.IsNaN(float64(value)) + case int, int8, int32, int64: + return false + case uint, uint8, uint32, uint64: + return false + } + + return math.IsNaN(value.float64()) +} + +// IsString will return true if value is a string (primitive). +func (value Value) IsString() bool { + return value.kind == valueString +} + +// IsObject will return true if value is an object. +func (value Value) IsObject() bool { + return value.kind == valueObject +} + +// IsFunction will return true if value is a function. +func (value Value) IsFunction() bool { + if value.kind != valueObject { + return false + } + return value.value.(*_object).class == "Function" +} + +// Class will return the class string of the value or the empty string if value is not an object. +// +// The return value will (generally) be one of: +// +// Object +// Function +// Array +// String +// Number +// Boolean +// Date +// RegExp +// +func (value Value) Class() string { + if value.kind != valueObject { + return "" + } + return value.value.(*_object).class +} + +func (value Value) isArray() bool { + if value.kind != valueObject { + return false + } + return isArray(value.value.(*_object)) +} + +func (value Value) isStringObject() bool { + if value.kind != valueObject { + return false + } + return value.value.(*_object).class == "String" +} + +func (value Value) isBooleanObject() bool { + if value.kind != valueObject { + return false + } + return value.value.(*_object).class == "Boolean" +} + +func (value Value) isNumberObject() bool { + if value.kind != valueObject { + return false + } + return value.value.(*_object).class == "Number" +} + +func (value Value) isDate() bool { + if value.kind != valueObject { + return false + } + return value.value.(*_object).class == "Date" +} + +func (value Value) isRegExp() bool { + if value.kind != valueObject { + return false + } + return value.value.(*_object).class == "RegExp" +} + +func (value Value) isError() bool { + if value.kind != valueObject { + return false + } + return value.value.(*_object).class == "Error" +} + +// --- + +func toValue_reflectValuePanic(value interface{}, kind reflect.Kind) { + // FIXME? + switch kind { + case reflect.Struct: + panic(newError(nil, "TypeError", 0, "invalid value (struct): missing runtime: %v (%T)", value, value)) + case reflect.Map: + panic(newError(nil, "TypeError", 0, "invalid value (map): missing runtime: %v (%T)", value, value)) + case reflect.Slice: + panic(newError(nil, "TypeError", 0, "invalid value (slice): missing runtime: %v (%T)", value, value)) + } +} + +func toValue(value interface{}) Value { + switch value := value.(type) { + case Value: + return value + case bool: + return Value{valueBoolean, value} + case int: + return Value{valueNumber, value} + case int8: + return Value{valueNumber, value} + case int16: + return Value{valueNumber, value} + case int32: + return Value{valueNumber, value} + case int64: + return Value{valueNumber, value} + case uint: + return Value{valueNumber, value} + case uint8: + return Value{valueNumber, value} + case uint16: + return Value{valueNumber, value} + case uint32: + return Value{valueNumber, value} + case uint64: + return Value{valueNumber, value} + case float32: + return Value{valueNumber, float64(value)} + case float64: + return Value{valueNumber, value} + case []uint16: + return Value{valueString, value} + case string: + return Value{valueString, value} + // A rune is actually an int32, which is handled above + case *_object: + return Value{valueObject, value} + case *Object: + return Value{valueObject, value.object} + case Object: + return Value{valueObject, value.object} + case _reference: // reference is an interface (already a pointer) + return Value{valueReference, value} + case _result: + return Value{valueResult, value} + case nil: + // TODO Ugh. + return Value{} + case reflect.Value: + for value.Kind() == reflect.Ptr { + // We were given a pointer, so we'll drill down until we get a non-pointer + // + // These semantics might change if we want to start supporting pointers to values transparently + // (It would be best not to depend on this behavior) + // FIXME: UNDEFINED + if value.IsNil() { + return Value{} + } + value = value.Elem() + } + switch value.Kind() { + case reflect.Bool: + return Value{valueBoolean, bool(value.Bool())} + case reflect.Int: + return Value{valueNumber, int(value.Int())} + case reflect.Int8: + return Value{valueNumber, int8(value.Int())} + case reflect.Int16: + return Value{valueNumber, int16(value.Int())} + case reflect.Int32: + return Value{valueNumber, int32(value.Int())} + case reflect.Int64: + return Value{valueNumber, int64(value.Int())} + case reflect.Uint: + return Value{valueNumber, uint(value.Uint())} + case reflect.Uint8: + return Value{valueNumber, uint8(value.Uint())} + case reflect.Uint16: + return Value{valueNumber, uint16(value.Uint())} + case reflect.Uint32: + return Value{valueNumber, uint32(value.Uint())} + case reflect.Uint64: + return Value{valueNumber, uint64(value.Uint())} + case reflect.Float32: + return Value{valueNumber, float32(value.Float())} + case reflect.Float64: + return Value{valueNumber, float64(value.Float())} + case reflect.String: + return Value{valueString, string(value.String())} + default: + toValue_reflectValuePanic(value.Interface(), value.Kind()) + } + default: + return toValue(reflect.ValueOf(value)) + } + // FIXME? + panic(newError(nil, "TypeError", 0, "invalid value: %v (%T)", value, value)) +} + +// String will return the value as a string. +// +// This method will make return the empty string if there is an error. +func (value Value) String() string { + result := "" + catchPanic(func() { + result = value.string() + }) + return result +} + +// ToBoolean will convert the value to a boolean (bool). +// +// ToValue(0).ToBoolean() => false +// ToValue("").ToBoolean() => false +// ToValue(true).ToBoolean() => true +// ToValue(1).ToBoolean() => true +// ToValue("Nothing happens").ToBoolean() => true +// +// If there is an error during the conversion process (like an uncaught exception), then the result will be false and an error. +func (value Value) ToBoolean() (bool, error) { + result := false + err := catchPanic(func() { + result = value.bool() + }) + return result, err +} + +func (value Value) numberValue() Value { + if value.kind == valueNumber { + return value + } + return Value{valueNumber, value.float64()} +} + +// ToFloat will convert the value to a number (float64). +// +// ToValue(0).ToFloat() => 0. +// ToValue(1.1).ToFloat() => 1.1 +// ToValue("11").ToFloat() => 11. +// +// If there is an error during the conversion process (like an uncaught exception), then the result will be 0 and an error. +func (value Value) ToFloat() (float64, error) { + result := float64(0) + err := catchPanic(func() { + result = value.float64() + }) + return result, err +} + +// ToInteger will convert the value to a number (int64). +// +// ToValue(0).ToInteger() => 0 +// ToValue(1.1).ToInteger() => 1 +// ToValue("11").ToInteger() => 11 +// +// If there is an error during the conversion process (like an uncaught exception), then the result will be 0 and an error. +func (value Value) ToInteger() (int64, error) { + result := int64(0) + err := catchPanic(func() { + result = value.number().int64 + }) + return result, err +} + +// ToString will convert the value to a string (string). +// +// ToValue(0).ToString() => "0" +// ToValue(false).ToString() => "false" +// ToValue(1.1).ToString() => "1.1" +// ToValue("11").ToString() => "11" +// ToValue('Nothing happens.').ToString() => "Nothing happens." +// +// If there is an error during the conversion process (like an uncaught exception), then the result will be the empty string ("") and an error. +func (value Value) ToString() (string, error) { + result := "" + err := catchPanic(func() { + result = value.string() + }) + return result, err +} + +func (value Value) _object() *_object { + switch value := value.value.(type) { + case *_object: + return value + } + return nil +} + +// Object will return the object of the value, or nil if value is not an object. +// +// This method will not do any implicit conversion. For example, calling this method on a string primitive value will not return a String object. +func (value Value) Object() *Object { + switch object := value.value.(type) { + case *_object: + return _newObject(object, value) + } + return nil +} + +func (value Value) reference() _reference { + switch value := value.value.(type) { + case _reference: + return value + } + return nil +} + +func (value Value) resolve() Value { + switch value := value.value.(type) { + case _reference: + return value.getValue() + } + return value +} + +var ( + __NaN__ float64 = math.NaN() + __PositiveInfinity__ float64 = math.Inf(+1) + __NegativeInfinity__ float64 = math.Inf(-1) + __PositiveZero__ float64 = 0 + __NegativeZero__ float64 = math.Float64frombits(0 | (1 << 63)) +) + +func positiveInfinity() float64 { + return __PositiveInfinity__ +} + +func negativeInfinity() float64 { + return __NegativeInfinity__ +} + +func positiveZero() float64 { + return __PositiveZero__ +} + +func negativeZero() float64 { + return __NegativeZero__ +} + +// NaNValue will return a value representing NaN. +// +// It is equivalent to: +// +// ToValue(math.NaN()) +// +func NaNValue() Value { + return Value{valueNumber, __NaN__} +} + +func positiveInfinityValue() Value { + return Value{valueNumber, __PositiveInfinity__} +} + +func negativeInfinityValue() Value { + return Value{valueNumber, __NegativeInfinity__} +} + +func positiveZeroValue() Value { + return Value{valueNumber, __PositiveZero__} +} + +func negativeZeroValue() Value { + return Value{valueNumber, __NegativeZero__} +} + +// TrueValue will return a value representing true. +// +// It is equivalent to: +// +// ToValue(true) +// +func TrueValue() Value { + return Value{valueBoolean, true} +} + +// FalseValue will return a value representing false. +// +// It is equivalent to: +// +// ToValue(false) +// +func FalseValue() Value { + return Value{valueBoolean, false} +} + +func sameValue(x Value, y Value) bool { + if x.kind != y.kind { + return false + } + result := false + switch x.kind { + case valueUndefined, valueNull: + result = true + case valueNumber: + x := x.float64() + y := y.float64() + if math.IsNaN(x) && math.IsNaN(y) { + result = true + } else { + result = x == y + if result && x == 0 { + // Since +0 != -0 + result = math.Signbit(x) == math.Signbit(y) + } + } + case valueString: + result = x.string() == y.string() + case valueBoolean: + result = x.bool() == y.bool() + case valueObject: + result = x._object() == y._object() + default: + panic(hereBeDragons()) + } + + return result +} + +func strictEqualityComparison(x Value, y Value) bool { + if x.kind != y.kind { + return false + } + result := false + switch x.kind { + case valueUndefined, valueNull: + result = true + case valueNumber: + x := x.float64() + y := y.float64() + if math.IsNaN(x) && math.IsNaN(y) { + result = false + } else { + result = x == y + } + case valueString: + result = x.string() == y.string() + case valueBoolean: + result = x.bool() == y.bool() + case valueObject: + result = x._object() == y._object() + default: + panic(hereBeDragons()) + } + + return result +} + +// Export will attempt to convert the value to a Go representation +// and return it via an interface{} kind. +// +// Export returns an error, but it will always be nil. It is present +// for backwards compatibility. +// +// If a reasonable conversion is not possible, then the original +// value is returned. +// +// undefined -> nil (FIXME?: Should be Value{}) +// null -> nil +// boolean -> bool +// number -> A number type (int, float32, uint64, ...) +// string -> string +// Array -> []interface{} +// Object -> map[string]interface{} +// +func (self Value) Export() (interface{}, error) { + return self.export(), nil +} + +func (self Value) export() interface{} { + + switch self.kind { + case valueUndefined: + return nil + case valueNull: + return nil + case valueNumber, valueBoolean: + return self.value + case valueString: + switch value := self.value.(type) { + case string: + return value + case []uint16: + return string(utf16.Decode(value)) + } + case valueObject: + object := self._object() + switch value := object.value.(type) { + case *_goStructObject: + return value.value.Interface() + case *_goMapObject: + return value.value.Interface() + case *_goArrayObject: + return value.value.Interface() + case *_goSliceObject: + return value.value.Interface() + } + if object.class == "Array" { + result := make([]interface{}, 0) + lengthValue := object.get("length") + length := lengthValue.value.(uint32) + kind := reflect.Invalid + state := 0 + var t reflect.Type + for index := uint32(0); index < length; index += 1 { + name := strconv.FormatInt(int64(index), 10) + if !object.hasProperty(name) { + continue + } + value := object.get(name).export() + + t = reflect.TypeOf(value) + + var k reflect.Kind + if t != nil { + k = t.Kind() + } + + if state == 0 { + kind = k + state = 1 + } else if state == 1 && kind != k { + state = 2 + } + + result = append(result, value) + } + + if state != 1 || kind == reflect.Interface || t == nil { + // No common type + return result + } + + // Convert to the common type + val := reflect.MakeSlice(reflect.SliceOf(t), len(result), len(result)) + for i, v := range result { + val.Index(i).Set(reflect.ValueOf(v)) + } + return val.Interface() + } else { + result := make(map[string]interface{}) + // TODO Should we export everything? Or just what is enumerable? + object.enumerate(false, func(name string) bool { + value := object.get(name) + if value.IsDefined() { + result[name] = value.export() + } + return true + }) + return result + } + } + + if self.safe() { + return self + } + + return Value{} +} + +func (self Value) evaluateBreakContinue(labels []string) _resultKind { + result := self.value.(_result) + if result.kind == resultBreak || result.kind == resultContinue { + for _, label := range labels { + if label == result.target { + return result.kind + } + } + } + return resultReturn +} + +func (self Value) evaluateBreak(labels []string) _resultKind { + result := self.value.(_result) + if result.kind == resultBreak { + for _, label := range labels { + if label == result.target { + return result.kind + } + } + } + return resultReturn +} + +func (self Value) exportNative() interface{} { + + switch self.kind { + case valueUndefined: + return self + case valueNull: + return nil + case valueNumber, valueBoolean: + return self.value + case valueString: + switch value := self.value.(type) { + case string: + return value + case []uint16: + return string(utf16.Decode(value)) + } + case valueObject: + object := self._object() + switch value := object.value.(type) { + case *_goStructObject: + return value.value.Interface() + case *_goMapObject: + return value.value.Interface() + case *_goArrayObject: + return value.value.Interface() + case *_goSliceObject: + return value.value.Interface() + } + } + + return self +} + +// Make a best effort to return a reflect.Value corresponding to reflect.Kind, but +// fallback to just returning the Go value we have handy. +func (value Value) toReflectValue(kind reflect.Kind) (reflect.Value, error) { + if kind != reflect.Float32 && kind != reflect.Float64 && kind != reflect.Interface { + switch value := value.value.(type) { + case float32: + _, frac := math.Modf(float64(value)) + if frac > 0 { + return reflect.Value{}, fmt.Errorf("RangeError: %v to reflect.Kind: %v", value, kind) + } + case float64: + _, frac := math.Modf(value) + if frac > 0 { + return reflect.Value{}, fmt.Errorf("RangeError: %v to reflect.Kind: %v", value, kind) + } + } + } + + switch kind { + case reflect.Bool: // Bool + return reflect.ValueOf(value.bool()), nil + case reflect.Int: // Int + // We convert to float64 here because converting to int64 will not tell us + // if a value is outside the range of int64 + tmp := toIntegerFloat(value) + if tmp < float_minInt || tmp > float_maxInt { + return reflect.Value{}, fmt.Errorf("RangeError: %f (%v) to int", tmp, value) + } else { + return reflect.ValueOf(int(tmp)), nil + } + case reflect.Int8: // Int8 + tmp := value.number().int64 + if tmp < int64_minInt8 || tmp > int64_maxInt8 { + return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to int8", tmp, value) + } else { + return reflect.ValueOf(int8(tmp)), nil + } + case reflect.Int16: // Int16 + tmp := value.number().int64 + if tmp < int64_minInt16 || tmp > int64_maxInt16 { + return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to int16", tmp, value) + } else { + return reflect.ValueOf(int16(tmp)), nil + } + case reflect.Int32: // Int32 + tmp := value.number().int64 + if tmp < int64_minInt32 || tmp > int64_maxInt32 { + return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to int32", tmp, value) + } else { + return reflect.ValueOf(int32(tmp)), nil + } + case reflect.Int64: // Int64 + // We convert to float64 here because converting to int64 will not tell us + // if a value is outside the range of int64 + tmp := toIntegerFloat(value) + if tmp < float_minInt64 || tmp > float_maxInt64 { + return reflect.Value{}, fmt.Errorf("RangeError: %f (%v) to int", tmp, value) + } else { + return reflect.ValueOf(int64(tmp)), nil + } + case reflect.Uint: // Uint + // We convert to float64 here because converting to int64 will not tell us + // if a value is outside the range of uint + tmp := toIntegerFloat(value) + if tmp < 0 || tmp > float_maxUint { + return reflect.Value{}, fmt.Errorf("RangeError: %f (%v) to uint", tmp, value) + } else { + return reflect.ValueOf(uint(tmp)), nil + } + case reflect.Uint8: // Uint8 + tmp := value.number().int64 + if tmp < 0 || tmp > int64_maxUint8 { + return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to uint8", tmp, value) + } else { + return reflect.ValueOf(uint8(tmp)), nil + } + case reflect.Uint16: // Uint16 + tmp := value.number().int64 + if tmp < 0 || tmp > int64_maxUint16 { + return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to uint16", tmp, value) + } else { + return reflect.ValueOf(uint16(tmp)), nil + } + case reflect.Uint32: // Uint32 + tmp := value.number().int64 + if tmp < 0 || tmp > int64_maxUint32 { + return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to uint32", tmp, value) + } else { + return reflect.ValueOf(uint32(tmp)), nil + } + case reflect.Uint64: // Uint64 + // We convert to float64 here because converting to int64 will not tell us + // if a value is outside the range of uint64 + tmp := toIntegerFloat(value) + if tmp < 0 || tmp > float_maxUint64 { + return reflect.Value{}, fmt.Errorf("RangeError: %f (%v) to uint64", tmp, value) + } else { + return reflect.ValueOf(uint64(tmp)), nil + } + case reflect.Float32: // Float32 + tmp := value.float64() + tmp1 := tmp + if 0 > tmp1 { + tmp1 = -tmp1 + } + if tmp1 > 0 && (tmp1 < math.SmallestNonzeroFloat32 || tmp1 > math.MaxFloat32) { + return reflect.Value{}, fmt.Errorf("RangeError: %f (%v) to float32", tmp, value) + } else { + return reflect.ValueOf(float32(tmp)), nil + } + case reflect.Float64: // Float64 + value := value.float64() + return reflect.ValueOf(float64(value)), nil + case reflect.String: // String + return reflect.ValueOf(value.string()), nil + case reflect.Invalid: // Invalid + case reflect.Complex64: // FIXME? Complex64 + case reflect.Complex128: // FIXME? Complex128 + case reflect.Chan: // FIXME? Chan + case reflect.Func: // FIXME? Func + case reflect.Ptr: // FIXME? Ptr + case reflect.UnsafePointer: // FIXME? UnsafePointer + default: + switch value.kind { + case valueObject: + object := value._object() + switch vl := object.value.(type) { + case *_goStructObject: // Struct + return reflect.ValueOf(vl.value.Interface()), nil + case *_goMapObject: // Map + return reflect.ValueOf(vl.value.Interface()), nil + case *_goArrayObject: // Array + return reflect.ValueOf(vl.value.Interface()), nil + case *_goSliceObject: // Slice + return reflect.ValueOf(vl.value.Interface()), nil + } + return reflect.ValueOf(value.exportNative()), nil + case valueEmpty, valueResult, valueReference: + // These are invalid, and should panic + default: + return reflect.ValueOf(value.value), nil + } + } + + // FIXME Should this end up as a TypeError? + panic(fmt.Errorf("invalid conversion of %v (%v) to reflect.Kind: %v", value.kind, value, kind)) +} + +func stringToReflectValue(value string, kind reflect.Kind) (reflect.Value, error) { + switch kind { + case reflect.Bool: + value, err := strconv.ParseBool(value) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(value), nil + case reflect.Int: + value, err := strconv.ParseInt(value, 0, 0) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(int(value)), nil + case reflect.Int8: + value, err := strconv.ParseInt(value, 0, 8) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(int8(value)), nil + case reflect.Int16: + value, err := strconv.ParseInt(value, 0, 16) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(int16(value)), nil + case reflect.Int32: + value, err := strconv.ParseInt(value, 0, 32) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(int32(value)), nil + case reflect.Int64: + value, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(int64(value)), nil + case reflect.Uint: + value, err := strconv.ParseUint(value, 0, 0) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(uint(value)), nil + case reflect.Uint8: + value, err := strconv.ParseUint(value, 0, 8) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(uint8(value)), nil + case reflect.Uint16: + value, err := strconv.ParseUint(value, 0, 16) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(uint16(value)), nil + case reflect.Uint32: + value, err := strconv.ParseUint(value, 0, 32) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(uint32(value)), nil + case reflect.Uint64: + value, err := strconv.ParseUint(value, 0, 64) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(uint64(value)), nil + case reflect.Float32: + value, err := strconv.ParseFloat(value, 32) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(float32(value)), nil + case reflect.Float64: + value, err := strconv.ParseFloat(value, 64) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(float64(value)), nil + case reflect.String: + return reflect.ValueOf(value), nil + } + + // FIXME This should end up as a TypeError? + panic(fmt.Errorf("invalid conversion of %q to reflect.Kind: %v", value, kind)) +} diff --git a/vendor/github.com/robertkrimen/otto/value_boolean.go b/vendor/github.com/robertkrimen/otto/value_boolean.go new file mode 100644 index 000000000..3040f4163 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/value_boolean.go @@ -0,0 +1,40 @@ +package otto + +import ( + "fmt" + "math" + "reflect" +) + +func (value Value) bool() bool { + if value.kind == valueBoolean { + return value.value.(bool) + } + if value.IsUndefined() { + return false + } + if value.IsNull() { + return false + } + switch value := value.value.(type) { + case bool: + return value + case int, int8, int16, int32, int64: + return 0 != reflect.ValueOf(value).Int() + case uint, uint8, uint16, uint32, uint64: + return 0 != reflect.ValueOf(value).Uint() + case float32: + return 0 != value + case float64: + if math.IsNaN(value) || value == 0 { + return false + } + return true + case string: + return 0 != len(value) + } + if value.IsObject() { + return true + } + panic(fmt.Errorf("toBoolean(%T)", value.value)) +} diff --git a/vendor/github.com/robertkrimen/otto/value_number.go b/vendor/github.com/robertkrimen/otto/value_number.go new file mode 100644 index 000000000..870bf115b --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/value_number.go @@ -0,0 +1,324 @@ +package otto + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" +) + +var stringToNumberParseInteger = regexp.MustCompile(`^(?:0[xX])`) + +func parseNumber(value string) float64 { + value = strings.TrimSpace(value) + + if value == "" { + return 0 + } + + parseFloat := false + if strings.IndexRune(value, '.') != -1 { + parseFloat = true + } else if stringToNumberParseInteger.MatchString(value) { + parseFloat = false + } else { + parseFloat = true + } + + if parseFloat { + number, err := strconv.ParseFloat(value, 64) + if err != nil && err.(*strconv.NumError).Err != strconv.ErrRange { + return math.NaN() + } + return number + } + + number, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return math.NaN() + } + return float64(number) +} + +func (value Value) float64() float64 { + switch value.kind { + case valueUndefined: + return math.NaN() + case valueNull: + return 0 + } + switch value := value.value.(type) { + case bool: + if value { + return 1 + } + return 0 + case int: + return float64(value) + case int8: + return float64(value) + case int16: + return float64(value) + case int32: + return float64(value) + case int64: + return float64(value) + case uint: + return float64(value) + case uint8: + return float64(value) + case uint16: + return float64(value) + case uint32: + return float64(value) + case uint64: + return float64(value) + case float64: + return value + case string: + return parseNumber(value) + case *_object: + return value.DefaultValue(defaultValueHintNumber).float64() + } + panic(fmt.Errorf("toFloat(%T)", value.value)) +} + +const ( + float_2_64 float64 = 18446744073709551616.0 + float_2_63 float64 = 9223372036854775808.0 + float_2_32 float64 = 4294967296.0 + float_2_31 float64 = 2147483648.0 + float_2_16 float64 = 65536.0 + integer_2_32 int64 = 4294967296 + integer_2_31 int64 = 2146483648 + sqrt1_2 float64 = math.Sqrt2 / 2 +) + +const ( + maxInt8 = math.MaxInt8 + minInt8 = math.MinInt8 + maxInt16 = math.MaxInt16 + minInt16 = math.MinInt16 + maxInt32 = math.MaxInt32 + minInt32 = math.MinInt32 + maxInt64 = math.MaxInt64 + minInt64 = math.MinInt64 + maxUint8 = math.MaxUint8 + maxUint16 = math.MaxUint16 + maxUint32 = math.MaxUint32 + maxUint64 = math.MaxUint64 + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 + + // int64 + int64_maxInt int64 = int64(maxInt) + int64_minInt int64 = int64(minInt) + int64_maxInt8 int64 = math.MaxInt8 + int64_minInt8 int64 = math.MinInt8 + int64_maxInt16 int64 = math.MaxInt16 + int64_minInt16 int64 = math.MinInt16 + int64_maxInt32 int64 = math.MaxInt32 + int64_minInt32 int64 = math.MinInt32 + int64_maxUint8 int64 = math.MaxUint8 + int64_maxUint16 int64 = math.MaxUint16 + int64_maxUint32 int64 = math.MaxUint32 + + // float64 + float_maxInt float64 = float64(int(^uint(0) >> 1)) + float_minInt float64 = float64(int(-maxInt - 1)) + float_minUint float64 = float64(0) + float_maxUint float64 = float64(uint(^uint(0))) + float_minUint64 float64 = float64(0) + float_maxUint64 float64 = math.MaxUint64 + float_maxInt64 float64 = math.MaxInt64 + float_minInt64 float64 = math.MinInt64 +) + +func toIntegerFloat(value Value) float64 { + float := value.float64() + if math.IsInf(float, 0) { + } else if math.IsNaN(float) { + float = 0 + } else if float > 0 { + float = math.Floor(float) + } else { + float = math.Ceil(float) + } + return float +} + +type _numberKind int + +const ( + numberInteger _numberKind = iota // 3.0 => 3.0 + numberFloat // 3.14159 => 3.0, 1+2**63 > 2**63-1 + numberInfinity // Infinity => 2**63-1 + numberNaN // NaN => 0 +) + +type _number struct { + kind _numberKind + int64 int64 + float64 float64 +} + +// FIXME +// http://www.goinggo.net/2013/08/gustavos-ieee-754-brain-teaser.html +// http://bazaar.launchpad.net/~niemeyer/strepr/trunk/view/6/strepr.go#L160 +func (value Value) number() (number _number) { + switch value := value.value.(type) { + case int8: + number.int64 = int64(value) + return + case int16: + number.int64 = int64(value) + return + case uint8: + number.int64 = int64(value) + return + case uint16: + number.int64 = int64(value) + return + case uint32: + number.int64 = int64(value) + return + case int: + number.int64 = int64(value) + return + case int64: + number.int64 = value + return + } + + float := value.float64() + if float == 0 { + return + } + + number.kind = numberFloat + number.float64 = float + + if math.IsNaN(float) { + number.kind = numberNaN + return + } + + if math.IsInf(float, 0) { + number.kind = numberInfinity + } + + if float >= float_maxInt64 { + number.int64 = math.MaxInt64 + return + } + + if float <= float_minInt64 { + number.int64 = math.MinInt64 + return + } + + integer := float64(0) + if float > 0 { + integer = math.Floor(float) + } else { + integer = math.Ceil(float) + } + + if float == integer { + number.kind = numberInteger + } + number.int64 = int64(float) + return +} + +// ECMA 262: 9.5 +func toInt32(value Value) int32 { + { + switch value := value.value.(type) { + case int8: + return int32(value) + case int16: + return int32(value) + case int32: + return value + } + } + floatValue := value.float64() + if math.IsNaN(floatValue) || math.IsInf(floatValue, 0) { + return 0 + } + if floatValue == 0 { // This will work for +0 & -0 + return 0 + } + remainder := math.Mod(floatValue, float_2_32) + if remainder > 0 { + remainder = math.Floor(remainder) + } else { + remainder = math.Ceil(remainder) + float_2_32 + } + if remainder > float_2_31 { + return int32(remainder - float_2_32) + } + return int32(remainder) +} + +func toUint32(value Value) uint32 { + { + switch value := value.value.(type) { + case int8: + return uint32(value) + case int16: + return uint32(value) + case uint8: + return uint32(value) + case uint16: + return uint32(value) + case uint32: + return value + } + } + floatValue := value.float64() + if math.IsNaN(floatValue) || math.IsInf(floatValue, 0) { + return 0 + } + if floatValue == 0 { + return 0 + } + remainder := math.Mod(floatValue, float_2_32) + if remainder > 0 { + remainder = math.Floor(remainder) + } else { + remainder = math.Ceil(remainder) + float_2_32 + } + return uint32(remainder) +} + +func toUint16(value Value) uint16 { + { + switch value := value.value.(type) { + case int8: + return uint16(value) + case uint8: + return uint16(value) + case uint16: + return value + } + } + floatValue := value.float64() + if math.IsNaN(floatValue) || math.IsInf(floatValue, 0) { + return 0 + } + if floatValue == 0 { + return 0 + } + remainder := math.Mod(floatValue, float_2_16) + if remainder > 0 { + remainder = math.Floor(remainder) + } else { + remainder = math.Ceil(remainder) + float_2_16 + } + return uint16(remainder) +} diff --git a/vendor/github.com/robertkrimen/otto/value_primitive.go b/vendor/github.com/robertkrimen/otto/value_primitive.go new file mode 100644 index 000000000..11ed329d1 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/value_primitive.go @@ -0,0 +1,23 @@ +package otto + +func toStringPrimitive(value Value) Value { + return _toPrimitive(value, defaultValueHintString) +} + +func toNumberPrimitive(value Value) Value { + return _toPrimitive(value, defaultValueHintNumber) +} + +func toPrimitive(value Value) Value { + return _toPrimitive(value, defaultValueNoHint) +} + +func _toPrimitive(value Value, hint _defaultValueHint) Value { + switch value.kind { + case valueNull, valueUndefined, valueNumber, valueString, valueBoolean: + return value + case valueObject: + return value._object().DefaultValue(hint) + } + panic(hereBeDragons(value.kind, value)) +} diff --git a/vendor/github.com/robertkrimen/otto/value_string.go b/vendor/github.com/robertkrimen/otto/value_string.go new file mode 100644 index 000000000..0fbfd6b25 --- /dev/null +++ b/vendor/github.com/robertkrimen/otto/value_string.go @@ -0,0 +1,103 @@ +package otto + +import ( + "fmt" + "math" + "regexp" + "strconv" + "unicode/utf16" +) + +var matchLeading0Exponent = regexp.MustCompile(`([eE][\+\-])0+([1-9])`) // 1e-07 => 1e-7 + +// FIXME +// https://code.google.com/p/v8/source/browse/branches/bleeding_edge/src/conversions.cc?spec=svn18082&r=18082 +func floatToString(value float64, bitsize int) string { + // TODO Fit to ECMA-262 9.8.1 specification + if math.IsNaN(value) { + return "NaN" + } else if math.IsInf(value, 0) { + if math.Signbit(value) { + return "-Infinity" + } + return "Infinity" + } + exponent := math.Log10(math.Abs(value)) + if exponent >= 21 || exponent < -6 { + return matchLeading0Exponent.ReplaceAllString(strconv.FormatFloat(value, 'g', -1, bitsize), "$1$2") + } + return strconv.FormatFloat(value, 'f', -1, bitsize) +} + +func numberToStringRadix(value Value, radix int) string { + float := value.float64() + if math.IsNaN(float) { + return "NaN" + } else if math.IsInf(float, 1) { + return "Infinity" + } else if math.IsInf(float, -1) { + return "-Infinity" + } + // FIXME This is very broken + // Need to do proper radix conversion for floats, ... + // This truncates large floats (so bad). + return strconv.FormatInt(int64(float), radix) +} + +func (value Value) string() string { + if value.kind == valueString { + switch value := value.value.(type) { + case string: + return value + case []uint16: + return string(utf16.Decode(value)) + } + } + if value.IsUndefined() { + return "undefined" + } + if value.IsNull() { + return "null" + } + switch value := value.value.(type) { + case bool: + return strconv.FormatBool(value) + case int: + return strconv.FormatInt(int64(value), 10) + case int8: + return strconv.FormatInt(int64(value), 10) + case int16: + return strconv.FormatInt(int64(value), 10) + case int32: + return strconv.FormatInt(int64(value), 10) + case int64: + return strconv.FormatInt(value, 10) + case uint: + return strconv.FormatUint(uint64(value), 10) + case uint8: + return strconv.FormatUint(uint64(value), 10) + case uint16: + return strconv.FormatUint(uint64(value), 10) + case uint32: + return strconv.FormatUint(uint64(value), 10) + case uint64: + return strconv.FormatUint(value, 10) + case float32: + if value == 0 { + return "0" // Take care not to return -0 + } + return floatToString(float64(value), 32) + case float64: + if value == 0 { + return "0" // Take care not to return -0 + } + return floatToString(value, 64) + case []uint16: + return string(utf16.Decode(value)) + case string: + return value + case *_object: + return value.DefaultValue(defaultValueHintString).string() + } + panic(fmt.Errorf("%v.string( %T)", value.value, value.value)) +} diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/text/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/text/PATENTS b/vendor/golang.org/x/text/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/text/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/text/encoding/encoding.go b/vendor/golang.org/x/text/encoding/encoding.go new file mode 100644 index 000000000..2a7d9529a --- /dev/null +++ b/vendor/golang.org/x/text/encoding/encoding.go @@ -0,0 +1,335 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package encoding defines an interface for character encodings, such as Shift +// JIS and Windows 1252, that can convert to and from UTF-8. +// +// Encoding implementations are provided in other packages, such as +// golang.org/x/text/encoding/charmap and +// golang.org/x/text/encoding/japanese. +package encoding // import "golang.org/x/text/encoding" + +import ( + "errors" + "io" + "strconv" + "unicode/utf8" + + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/transform" +) + +// TODO: +// - There seems to be some inconsistency in when decoders return errors +// and when not. Also documentation seems to suggest they shouldn't return +// errors at all (except for UTF-16). +// - Encoders seem to rely on or at least benefit from the input being in NFC +// normal form. Perhaps add an example how users could prepare their output. + +// Encoding is a character set encoding that can be transformed to and from +// UTF-8. +type Encoding interface { + // NewDecoder returns a Decoder. + NewDecoder() *Decoder + + // NewEncoder returns an Encoder. + NewEncoder() *Encoder +} + +// A Decoder converts bytes to UTF-8. It implements transform.Transformer. +// +// Transforming source bytes that are not of that encoding will not result in an +// error per se. Each byte that cannot be transcoded will be represented in the +// output by the UTF-8 encoding of '\uFFFD', the replacement rune. +type Decoder struct { + transform.Transformer + + // This forces external creators of Decoders to use names in struct + // initializers, allowing for future extendibility without having to break + // code. + _ struct{} +} + +// Bytes converts the given encoded bytes to UTF-8. It returns the converted +// bytes or 0, err if any error occurred. +func (d *Decoder) Bytes(b []byte) ([]byte, error) { + b, _, err := transform.Bytes(d, b) + if err != nil { + return nil, err + } + return b, nil +} + +// String converts the given encoded string to UTF-8. It returns the converted +// string or 0, err if any error occurred. +func (d *Decoder) String(s string) (string, error) { + s, _, err := transform.String(d, s) + if err != nil { + return "", err + } + return s, nil +} + +// Reader wraps another Reader to decode its bytes. +// +// The Decoder may not be used for any other operation as long as the returned +// Reader is in use. +func (d *Decoder) Reader(r io.Reader) io.Reader { + return transform.NewReader(r, d) +} + +// An Encoder converts bytes from UTF-8. It implements transform.Transformer. +// +// Each rune that cannot be transcoded will result in an error. In this case, +// the transform will consume all source byte up to, not including the offending +// rune. Transforming source bytes that are not valid UTF-8 will be replaced by +// `\uFFFD`. To return early with an error instead, use transform.Chain to +// preprocess the data with a UTF8Validator. +type Encoder struct { + transform.Transformer + + // This forces external creators of Encoders to use names in struct + // initializers, allowing for future extendibility without having to break + // code. + _ struct{} +} + +// Bytes converts bytes from UTF-8. It returns the converted bytes or 0, err if +// any error occurred. +func (e *Encoder) Bytes(b []byte) ([]byte, error) { + b, _, err := transform.Bytes(e, b) + if err != nil { + return nil, err + } + return b, nil +} + +// String converts a string from UTF-8. It returns the converted string or +// 0, err if any error occurred. +func (e *Encoder) String(s string) (string, error) { + s, _, err := transform.String(e, s) + if err != nil { + return "", err + } + return s, nil +} + +// Writer wraps another Writer to encode its UTF-8 output. +// +// The Encoder may not be used for any other operation as long as the returned +// Writer is in use. +func (e *Encoder) Writer(w io.Writer) io.Writer { + return transform.NewWriter(w, e) +} + +// ASCIISub is the ASCII substitute character, as recommended by +// http://unicode.org/reports/tr36/#Text_Comparison +const ASCIISub = '\x1a' + +// Nop is the nop encoding. Its transformed bytes are the same as the source +// bytes; it does not replace invalid UTF-8 sequences. +var Nop Encoding = nop{} + +type nop struct{} + +func (nop) NewDecoder() *Decoder { + return &Decoder{Transformer: transform.Nop} +} +func (nop) NewEncoder() *Encoder { + return &Encoder{Transformer: transform.Nop} +} + +// Replacement is the replacement encoding. Decoding from the replacement +// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to +// the replacement encoding yields the same as the source bytes except that +// invalid UTF-8 is converted to '\uFFFD'. +// +// It is defined at http://encoding.spec.whatwg.org/#replacement +var Replacement Encoding = replacement{} + +type replacement struct{} + +func (replacement) NewDecoder() *Decoder { + return &Decoder{Transformer: replacementDecoder{}} +} + +func (replacement) NewEncoder() *Encoder { + return &Encoder{Transformer: replacementEncoder{}} +} + +func (replacement) ID() (mib identifier.MIB, other string) { + return identifier.Replacement, "" +} + +type replacementDecoder struct{ transform.NopResetter } + +func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if len(dst) < 3 { + return 0, 0, transform.ErrShortDst + } + if atEOF { + const fffd = "\ufffd" + dst[0] = fffd[0] + dst[1] = fffd[1] + dst[2] = fffd[2] + nDst = 3 + } + return nDst, len(src), nil +} + +type replacementEncoder struct{ transform.NopResetter } + +func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + r, size := rune(0), 0 + + for ; nSrc < len(src); nSrc += size { + r = rune(src[nSrc]) + + // Decode a 1-byte rune. + if r < utf8.RuneSelf { + size = 1 + + } else { + // Decode a multi-byte rune. + r, size = utf8.DecodeRune(src[nSrc:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + r = '\ufffd' + } + } + + if nDst+utf8.RuneLen(r) > len(dst) { + err = transform.ErrShortDst + break + } + nDst += utf8.EncodeRune(dst[nDst:], r) + } + return nDst, nSrc, err +} + +// HTMLEscapeUnsupported wraps encoders to replace source runes outside the +// repertoire of the destination encoding with HTML escape sequences. +// +// This wrapper exists to comply to URL and HTML forms requiring a +// non-terminating legacy encoder. The produced sequences may lead to data +// loss as they are indistinguishable from legitimate input. To avoid this +// issue, use UTF-8 encodings whenever possible. +func HTMLEscapeUnsupported(e *Encoder) *Encoder { + return &Encoder{Transformer: &errorHandler{e, errorToHTML}} +} + +// ReplaceUnsupported wraps encoders to replace source runes outside the +// repertoire of the destination encoding with an encoding-specific +// replacement. +// +// This wrapper is only provided for backwards compatibility and legacy +// handling. Its use is strongly discouraged. Use UTF-8 whenever possible. +func ReplaceUnsupported(e *Encoder) *Encoder { + return &Encoder{Transformer: &errorHandler{e, errorToReplacement}} +} + +type errorHandler struct { + *Encoder + handler func(dst []byte, r rune, err repertoireError) (n int, ok bool) +} + +// TODO: consider making this error public in some form. +type repertoireError interface { + Replacement() byte +} + +func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF) + for err != nil { + rerr, ok := err.(repertoireError) + if !ok { + return nDst, nSrc, err + } + r, sz := utf8.DecodeRune(src[nSrc:]) + n, ok := h.handler(dst[nDst:], r, rerr) + if !ok { + return nDst, nSrc, transform.ErrShortDst + } + err = nil + nDst += n + if nSrc += sz; nSrc < len(src) { + var dn, sn int + dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF) + nDst += dn + nSrc += sn + } + } + return nDst, nSrc, err +} + +func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) { + buf := [8]byte{} + b := strconv.AppendUint(buf[:0], uint64(r), 10) + if n = len(b) + len("&#;"); n >= len(dst) { + return 0, false + } + dst[0] = '&' + dst[1] = '#' + dst[copy(dst[2:], b)+2] = ';' + return n, true +} + +func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) { + if len(dst) == 0 { + return 0, false + } + dst[0] = err.Replacement() + return 1, true +} + +// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8. +var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8") + +// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first +// input byte that is not valid UTF-8. +var UTF8Validator transform.Transformer = utf8Validator{} + +type utf8Validator struct{ transform.NopResetter } + +func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := len(src) + if n > len(dst) { + n = len(dst) + } + for i := 0; i < n; { + if c := src[i]; c < utf8.RuneSelf { + dst[i] = c + i++ + continue + } + _, size := utf8.DecodeRune(src[i:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + err = ErrInvalidUTF8 + if !atEOF && !utf8.FullRune(src[i:]) { + err = transform.ErrShortSrc + } + return i, i, err + } + if i+size > len(dst) { + return i, i, transform.ErrShortDst + } + for ; size > 0; size-- { + dst[i] = src[i] + i++ + } + } + if len(src) > len(dst) { + err = transform.ErrShortDst + } + return n, n, err +} diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go new file mode 100644 index 000000000..0c8eba7e5 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go @@ -0,0 +1,137 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "log" + "strings" + + "golang.org/x/text/internal/gen" +) + +type registry struct { + XMLName xml.Name `xml:"registry"` + Updated string `xml:"updated"` + Registry []struct { + ID string `xml:"id,attr"` + Record []struct { + Name string `xml:"name"` + Xref []struct { + Type string `xml:"type,attr"` + Data string `xml:"data,attr"` + } `xml:"xref"` + Desc struct { + Data string `xml:",innerxml"` + // Any []struct { + // Data string `xml:",chardata"` + // } `xml:",any"` + // Data string `xml:",chardata"` + } `xml:"description,"` + MIB string `xml:"value"` + Alias []string `xml:"alias"` + MIME string `xml:"preferred_alias"` + } `xml:"record"` + } `xml:"registry"` +} + +func main() { + r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml") + reg := ®istry{} + if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF { + log.Fatalf("Error decoding charset registry: %v", err) + } + if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" { + log.Fatalf("Unexpected ID %s", reg.Registry[0].ID) + } + + w := &bytes.Buffer{} + fmt.Fprintf(w, "const (\n") + for _, rec := range reg.Registry[0].Record { + constName := "" + for _, a := range rec.Alias { + if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 { + // Some of the constant definitions have comments in them. Strip those. + constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0]) + } + } + if constName == "" { + switch rec.MIB { + case "2085": + constName = "HZGB2312" // Not listed as alias for some reason. + default: + log.Fatalf("No cs alias defined for %s.", rec.MIB) + } + } + if rec.MIME != "" { + rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME) + } + fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME) + if len(rec.Desc.Data) > 0 { + fmt.Fprint(w, "// ") + d := xml.NewDecoder(strings.NewReader(rec.Desc.Data)) + inElem := true + attr := "" + for { + t, err := d.Token() + if err != nil { + if err != io.EOF { + log.Fatal(err) + } + break + } + switch x := t.(type) { + case xml.CharData: + attr = "" // Don't need attribute info. + a := bytes.Split([]byte(x), []byte("\n")) + for i, b := range a { + if b = bytes.TrimSpace(b); len(b) != 0 { + if !inElem && i > 0 { + fmt.Fprint(w, "\n// ") + } + inElem = false + fmt.Fprintf(w, "%s ", string(b)) + } + } + case xml.StartElement: + if x.Name.Local == "xref" { + inElem = true + use := false + for _, a := range x.Attr { + if a.Name.Local == "type" { + use = use || a.Value != "person" + } + if a.Name.Local == "data" && use { + attr = a.Value + " " + } + } + } + case xml.EndElement: + inElem = false + fmt.Fprint(w, attr) + } + } + fmt.Fprint(w, "\n") + } + for _, x := range rec.Xref { + switch x.Type { + case "rfc": + fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data)) + case "uri": + fmt.Fprintf(w, "// Reference: %s\n", x.Data) + } + } + fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB) + fmt.Fprintln(w) + } + fmt.Fprintln(w, ")") + + gen.WriteGoFile("mib.go", "identifier", w.Bytes()) +} diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go b/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go new file mode 100644 index 000000000..2a2da0ef2 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go @@ -0,0 +1,81 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package identifier defines the contract between implementations of Encoding +// and Index by defining identifiers that uniquely identify standardized coded +// character sets (CCS) and character encoding schemes (CES), which we will +// together refer to as encodings, for which Encoding implementations provide +// converters to and from UTF-8. This package is typically only of concern to +// implementers of Indexes and Encodings. +// +// One part of the identifier is the MIB code, which is defined by IANA and +// uniquely identifies a CCS or CES. Each code is associated with data that +// references authorities, official documentation as well as aliases and MIME +// names. +// +// Not all CESs are covered by the IANA registry. The "other" string that is +// returned by ID can be used to identify other character sets or versions of +// existing ones. +// +// It is recommended that each package that provides a set of Encodings provide +// the All and Common variables to reference all supported encodings and +// commonly used subset. This allows Index implementations to include all +// available encodings without explicitly referencing or knowing about them. +package identifier + +// Note: this package is internal, but could be made public if there is a need +// for writing third-party Indexes and Encodings. + +// References: +// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt +// - http://www.iana.org/assignments/character-sets/character-sets.xhtml +// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib +// - http://www.ietf.org/rfc/rfc2978.txt +// - http://www.unicode.org/reports/tr22/ +// - http://www.w3.org/TR/encoding/ +// - http://www.w3.org/TR/encoding/indexes/encodings.json +// - https://encoding.spec.whatwg.org/ +// - https://tools.ietf.org/html/rfc6657#section-5 + +// Interface can be implemented by Encodings to define the CCS or CES for which +// it implements conversions. +type Interface interface { + // ID returns an encoding identifier. Exactly one of the mib and other + // values should be non-zero. + // + // In the usual case it is only necessary to indicate the MIB code. The + // other string can be used to specify encodings for which there is no MIB, + // such as "x-mac-dingbat". + // + // The other string may only contain the characters a-z, A-Z, 0-9, - and _. + ID() (mib MIB, other string) + + // NOTE: the restrictions on the encoding are to allow extending the syntax + // with additional information such as versions, vendors and other variants. +} + +// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds +// some identifiers for some encodings that are not covered by the IANA +// standard. +// +// See http://www.iana.org/assignments/ianacharset-mib. +type MIB uint16 + +// These additional MIB types are not defined in IANA. They are added because +// they are common and defined within the text repo. +const ( + // Unofficial marks the start of encodings not registered by IANA. + Unofficial MIB = 10000 + iota + + // Replacement is the WhatWG replacement encoding. + Replacement + + // XUserDefined is the code for x-user-defined. + XUserDefined + + // MacintoshCyrillic is the code for x-mac-cyrillic. + MacintoshCyrillic +) diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go new file mode 100644 index 000000000..915abfa29 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go @@ -0,0 +1,1621 @@ +// This file was generated by go generate; DO NOT EDIT + +package identifier + +const ( + // ASCII is the MIB identifier with IANA name US-ASCII (MIME: US-ASCII). + // + // ANSI X3.4-1986 + // Reference: RFC2046 + ASCII MIB = 3 + + // ISOLatin1 is the MIB identifier with IANA name ISO_8859-1:1987 (MIME: ISO-8859-1). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin1 MIB = 4 + + // ISOLatin2 is the MIB identifier with IANA name ISO_8859-2:1987 (MIME: ISO-8859-2). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin2 MIB = 5 + + // ISOLatin3 is the MIB identifier with IANA name ISO_8859-3:1988 (MIME: ISO-8859-3). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin3 MIB = 6 + + // ISOLatin4 is the MIB identifier with IANA name ISO_8859-4:1988 (MIME: ISO-8859-4). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin4 MIB = 7 + + // ISOLatinCyrillic is the MIB identifier with IANA name ISO_8859-5:1988 (MIME: ISO-8859-5). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinCyrillic MIB = 8 + + // ISOLatinArabic is the MIB identifier with IANA name ISO_8859-6:1987 (MIME: ISO-8859-6). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinArabic MIB = 9 + + // ISOLatinGreek is the MIB identifier with IANA name ISO_8859-7:1987 (MIME: ISO-8859-7). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1947 + // Reference: RFC1345 + ISOLatinGreek MIB = 10 + + // ISOLatinHebrew is the MIB identifier with IANA name ISO_8859-8:1988 (MIME: ISO-8859-8). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinHebrew MIB = 11 + + // ISOLatin5 is the MIB identifier with IANA name ISO_8859-9:1989 (MIME: ISO-8859-9). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin5 MIB = 12 + + // ISOLatin6 is the MIB identifier with IANA name ISO-8859-10 (MIME: ISO-8859-10). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin6 MIB = 13 + + // ISOTextComm is the MIB identifier with IANA name ISO_6937-2-add. + // + // ISO-IR: International Register of Escape Sequences and ISO 6937-2:1983 + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOTextComm MIB = 14 + + // HalfWidthKatakana is the MIB identifier with IANA name JIS_X0201. + // + // JIS X 0201-1976. One byte only, this is equivalent to + // JIS/Roman (similar to ASCII) plus eight-bit half-width + // Katakana + // Reference: RFC1345 + HalfWidthKatakana MIB = 15 + + // JISEncoding is the MIB identifier with IANA name JIS_Encoding. + // + // JIS X 0202-1991. Uses ISO 2022 escape sequences to + // shift code sets as documented in JIS X 0202-1991. + JISEncoding MIB = 16 + + // ShiftJIS is the MIB identifier with IANA name Shift_JIS (MIME: Shift_JIS). + // + // This charset is an extension of csHalfWidthKatakana by + // adding graphic characters in JIS X 0208. The CCS's are + // JIS X0201:1997 and JIS X0208:1997. The + // complete definition is shown in Appendix 1 of JIS + // X0208:1997. + // This charset can be used for the top-level media type "text". + ShiftJIS MIB = 17 + + // EUCPkdFmtJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Packed_Format_for_Japanese (MIME: EUC-JP). + // + // Standardized by OSF, UNIX International, and UNIX Systems + // Laboratories Pacific. Uses ISO 2022 rules to select + // code set 0: US-ASCII (a single 7-bit byte set) + // code set 1: JIS X0208-1990 (a double 8-bit byte set) + // restricted to A0-FF in both bytes + // code set 2: Half Width Katakana (a single 7-bit byte set) + // requiring SS2 as the character prefix + // code set 3: JIS X0212-1990 (a double 7-bit byte set) + // restricted to A0-FF in both bytes + // requiring SS3 as the character prefix + EUCPkdFmtJapanese MIB = 18 + + // EUCFixWidJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Fixed_Width_for_Japanese. + // + // Used in Japan. Each character is 2 octets. + // code set 0: US-ASCII (a single 7-bit byte set) + // 1st byte = 00 + // 2nd byte = 20-7E + // code set 1: JIS X0208-1990 (a double 7-bit byte set) + // restricted to A0-FF in both bytes + // code set 2: Half Width Katakana (a single 7-bit byte set) + // 1st byte = 00 + // 2nd byte = A0-FF + // code set 3: JIS X0212-1990 (a double 7-bit byte set) + // restricted to A0-FF in + // the first byte + // and 21-7E in the second byte + EUCFixWidJapanese MIB = 19 + + // ISO4UnitedKingdom is the MIB identifier with IANA name BS_4730. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO4UnitedKingdom MIB = 20 + + // ISO11SwedishForNames is the MIB identifier with IANA name SEN_850200_C. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO11SwedishForNames MIB = 21 + + // ISO15Italian is the MIB identifier with IANA name IT. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO15Italian MIB = 22 + + // ISO17Spanish is the MIB identifier with IANA name ES. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO17Spanish MIB = 23 + + // ISO21German is the MIB identifier with IANA name DIN_66003. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO21German MIB = 24 + + // ISO60Norwegian1 is the MIB identifier with IANA name NS_4551-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO60Norwegian1 MIB = 25 + + // ISO69French is the MIB identifier with IANA name NF_Z_62-010. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO69French MIB = 26 + + // ISO10646UTF1 is the MIB identifier with IANA name ISO-10646-UTF-1. + // + // Universal Transfer Format (1), this is the multibyte + // encoding, that subsets ASCII-7. It does not have byte + // ordering issues. + ISO10646UTF1 MIB = 27 + + // ISO646basic1983 is the MIB identifier with IANA name ISO_646.basic:1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO646basic1983 MIB = 28 + + // INVARIANT is the MIB identifier with IANA name INVARIANT. + // + // Reference: RFC1345 + INVARIANT MIB = 29 + + // ISO2IntlRefVersion is the MIB identifier with IANA name ISO_646.irv:1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO2IntlRefVersion MIB = 30 + + // NATSSEFI is the MIB identifier with IANA name NATS-SEFI. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSSEFI MIB = 31 + + // NATSSEFIADD is the MIB identifier with IANA name NATS-SEFI-ADD. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSSEFIADD MIB = 32 + + // NATSDANO is the MIB identifier with IANA name NATS-DANO. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSDANO MIB = 33 + + // NATSDANOADD is the MIB identifier with IANA name NATS-DANO-ADD. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSDANOADD MIB = 34 + + // ISO10Swedish is the MIB identifier with IANA name SEN_850200_B. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO10Swedish MIB = 35 + + // KSC56011987 is the MIB identifier with IANA name KS_C_5601-1987. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + KSC56011987 MIB = 36 + + // ISO2022KR is the MIB identifier with IANA name ISO-2022-KR (MIME: ISO-2022-KR). + // + // rfc1557 (see also KS_C_5601-1987) + // Reference: RFC1557 + ISO2022KR MIB = 37 + + // EUCKR is the MIB identifier with IANA name EUC-KR (MIME: EUC-KR). + // + // rfc1557 (see also KS_C_5861-1992) + // Reference: RFC1557 + EUCKR MIB = 38 + + // ISO2022JP is the MIB identifier with IANA name ISO-2022-JP (MIME: ISO-2022-JP). + // + // rfc1468 (see also rfc2237 ) + // Reference: RFC1468 + ISO2022JP MIB = 39 + + // ISO2022JP2 is the MIB identifier with IANA name ISO-2022-JP-2 (MIME: ISO-2022-JP-2). + // + // rfc1554 + // Reference: RFC1554 + ISO2022JP2 MIB = 40 + + // ISO13JISC6220jp is the MIB identifier with IANA name JIS_C6220-1969-jp. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO13JISC6220jp MIB = 41 + + // ISO14JISC6220ro is the MIB identifier with IANA name JIS_C6220-1969-ro. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO14JISC6220ro MIB = 42 + + // ISO16Portuguese is the MIB identifier with IANA name PT. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO16Portuguese MIB = 43 + + // ISO18Greek7Old is the MIB identifier with IANA name greek7-old. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO18Greek7Old MIB = 44 + + // ISO19LatinGreek is the MIB identifier with IANA name latin-greek. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO19LatinGreek MIB = 45 + + // ISO25French is the MIB identifier with IANA name NF_Z_62-010_(1973). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO25French MIB = 46 + + // ISO27LatinGreek1 is the MIB identifier with IANA name Latin-greek-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO27LatinGreek1 MIB = 47 + + // ISO5427Cyrillic is the MIB identifier with IANA name ISO_5427. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO5427Cyrillic MIB = 48 + + // ISO42JISC62261978 is the MIB identifier with IANA name JIS_C6226-1978. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO42JISC62261978 MIB = 49 + + // ISO47BSViewdata is the MIB identifier with IANA name BS_viewdata. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO47BSViewdata MIB = 50 + + // ISO49INIS is the MIB identifier with IANA name INIS. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO49INIS MIB = 51 + + // ISO50INIS8 is the MIB identifier with IANA name INIS-8. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO50INIS8 MIB = 52 + + // ISO51INISCyrillic is the MIB identifier with IANA name INIS-cyrillic. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO51INISCyrillic MIB = 53 + + // ISO54271981 is the MIB identifier with IANA name ISO_5427:1981. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO54271981 MIB = 54 + + // ISO5428Greek is the MIB identifier with IANA name ISO_5428:1980. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO5428Greek MIB = 55 + + // ISO57GB1988 is the MIB identifier with IANA name GB_1988-80. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO57GB1988 MIB = 56 + + // ISO58GB231280 is the MIB identifier with IANA name GB_2312-80. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO58GB231280 MIB = 57 + + // ISO61Norwegian2 is the MIB identifier with IANA name NS_4551-2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO61Norwegian2 MIB = 58 + + // ISO70VideotexSupp1 is the MIB identifier with IANA name videotex-suppl. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO70VideotexSupp1 MIB = 59 + + // ISO84Portuguese2 is the MIB identifier with IANA name PT2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO84Portuguese2 MIB = 60 + + // ISO85Spanish2 is the MIB identifier with IANA name ES2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO85Spanish2 MIB = 61 + + // ISO86Hungarian is the MIB identifier with IANA name MSZ_7795.3. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO86Hungarian MIB = 62 + + // ISO87JISX0208 is the MIB identifier with IANA name JIS_C6226-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO87JISX0208 MIB = 63 + + // ISO88Greek7 is the MIB identifier with IANA name greek7. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO88Greek7 MIB = 64 + + // ISO89ASMO449 is the MIB identifier with IANA name ASMO_449. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO89ASMO449 MIB = 65 + + // ISO90 is the MIB identifier with IANA name iso-ir-90. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO90 MIB = 66 + + // ISO91JISC62291984a is the MIB identifier with IANA name JIS_C6229-1984-a. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO91JISC62291984a MIB = 67 + + // ISO92JISC62991984b is the MIB identifier with IANA name JIS_C6229-1984-b. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO92JISC62991984b MIB = 68 + + // ISO93JIS62291984badd is the MIB identifier with IANA name JIS_C6229-1984-b-add. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO93JIS62291984badd MIB = 69 + + // ISO94JIS62291984hand is the MIB identifier with IANA name JIS_C6229-1984-hand. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO94JIS62291984hand MIB = 70 + + // ISO95JIS62291984handadd is the MIB identifier with IANA name JIS_C6229-1984-hand-add. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO95JIS62291984handadd MIB = 71 + + // ISO96JISC62291984kana is the MIB identifier with IANA name JIS_C6229-1984-kana. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO96JISC62291984kana MIB = 72 + + // ISO2033 is the MIB identifier with IANA name ISO_2033-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO2033 MIB = 73 + + // ISO99NAPLPS is the MIB identifier with IANA name ANSI_X3.110-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO99NAPLPS MIB = 74 + + // ISO102T617bit is the MIB identifier with IANA name T.61-7bit. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO102T617bit MIB = 75 + + // ISO103T618bit is the MIB identifier with IANA name T.61-8bit. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO103T618bit MIB = 76 + + // ISO111ECMACyrillic is the MIB identifier with IANA name ECMA-cyrillic. + // + // ISO registry + // (formerly ECMA + // registry ) + ISO111ECMACyrillic MIB = 77 + + // ISO121Canadian1 is the MIB identifier with IANA name CSA_Z243.4-1985-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO121Canadian1 MIB = 78 + + // ISO122Canadian2 is the MIB identifier with IANA name CSA_Z243.4-1985-2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO122Canadian2 MIB = 79 + + // ISO123CSAZ24341985gr is the MIB identifier with IANA name CSA_Z243.4-1985-gr. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO123CSAZ24341985gr MIB = 80 + + // ISO88596E is the MIB identifier with IANA name ISO_8859-6-E (MIME: ISO-8859-6-E). + // + // rfc1556 + // Reference: RFC1556 + ISO88596E MIB = 81 + + // ISO88596I is the MIB identifier with IANA name ISO_8859-6-I (MIME: ISO-8859-6-I). + // + // rfc1556 + // Reference: RFC1556 + ISO88596I MIB = 82 + + // ISO128T101G2 is the MIB identifier with IANA name T.101-G2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO128T101G2 MIB = 83 + + // ISO88598E is the MIB identifier with IANA name ISO_8859-8-E (MIME: ISO-8859-8-E). + // + // rfc1556 + // Reference: RFC1556 + ISO88598E MIB = 84 + + // ISO88598I is the MIB identifier with IANA name ISO_8859-8-I (MIME: ISO-8859-8-I). + // + // rfc1556 + // Reference: RFC1556 + ISO88598I MIB = 85 + + // ISO139CSN369103 is the MIB identifier with IANA name CSN_369103. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO139CSN369103 MIB = 86 + + // ISO141JUSIB1002 is the MIB identifier with IANA name JUS_I.B1.002. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO141JUSIB1002 MIB = 87 + + // ISO143IECP271 is the MIB identifier with IANA name IEC_P27-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO143IECP271 MIB = 88 + + // ISO146Serbian is the MIB identifier with IANA name JUS_I.B1.003-serb. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO146Serbian MIB = 89 + + // ISO147Macedonian is the MIB identifier with IANA name JUS_I.B1.003-mac. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO147Macedonian MIB = 90 + + // ISO150GreekCCITT is the MIB identifier with IANA name greek-ccitt. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO150GreekCCITT MIB = 91 + + // ISO151Cuba is the MIB identifier with IANA name NC_NC00-10:81. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO151Cuba MIB = 92 + + // ISO6937Add is the MIB identifier with IANA name ISO_6937-2-25. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO6937Add MIB = 93 + + // ISO153GOST1976874 is the MIB identifier with IANA name GOST_19768-74. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO153GOST1976874 MIB = 94 + + // ISO8859Supp is the MIB identifier with IANA name ISO_8859-supp. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO8859Supp MIB = 95 + + // ISO10367Box is the MIB identifier with IANA name ISO_10367-box. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO10367Box MIB = 96 + + // ISO158Lap is the MIB identifier with IANA name latin-lap. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO158Lap MIB = 97 + + // ISO159JISX02121990 is the MIB identifier with IANA name JIS_X0212-1990. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO159JISX02121990 MIB = 98 + + // ISO646Danish is the MIB identifier with IANA name DS_2089. + // + // Danish Standard, DS 2089, February 1974 + // Reference: RFC1345 + ISO646Danish MIB = 99 + + // USDK is the MIB identifier with IANA name us-dk. + // + // Reference: RFC1345 + USDK MIB = 100 + + // DKUS is the MIB identifier with IANA name dk-us. + // + // Reference: RFC1345 + DKUS MIB = 101 + + // KSC5636 is the MIB identifier with IANA name KSC5636. + // + // Reference: RFC1345 + KSC5636 MIB = 102 + + // Unicode11UTF7 is the MIB identifier with IANA name UNICODE-1-1-UTF-7. + // + // rfc1642 + // Reference: RFC1642 + Unicode11UTF7 MIB = 103 + + // ISO2022CN is the MIB identifier with IANA name ISO-2022-CN. + // + // rfc1922 + // Reference: RFC1922 + ISO2022CN MIB = 104 + + // ISO2022CNEXT is the MIB identifier with IANA name ISO-2022-CN-EXT. + // + // rfc1922 + // Reference: RFC1922 + ISO2022CNEXT MIB = 105 + + // UTF8 is the MIB identifier with IANA name UTF-8. + // + // rfc3629 + // Reference: RFC3629 + UTF8 MIB = 106 + + // ISO885913 is the MIB identifier with IANA name ISO-8859-13. + // + // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-13 http://www.iana.org/assignments/charset-reg/ISO-8859-13 + ISO885913 MIB = 109 + + // ISO885914 is the MIB identifier with IANA name ISO-8859-14. + // + // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-14 + ISO885914 MIB = 110 + + // ISO885915 is the MIB identifier with IANA name ISO-8859-15. + // + // ISO + // Please see: http://www.iana.org/assignments/charset-reg/ISO-8859-15 + ISO885915 MIB = 111 + + // ISO885916 is the MIB identifier with IANA name ISO-8859-16. + // + // ISO + ISO885916 MIB = 112 + + // GBK is the MIB identifier with IANA name GBK. + // + // Chinese IT Standardization Technical Committee + // Please see: http://www.iana.org/assignments/charset-reg/GBK + GBK MIB = 113 + + // GB18030 is the MIB identifier with IANA name GB18030. + // + // Chinese IT Standardization Technical Committee + // Please see: http://www.iana.org/assignments/charset-reg/GB18030 + GB18030 MIB = 114 + + // OSDEBCDICDF0415 is the MIB identifier with IANA name OSD_EBCDIC_DF04_15. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15 + OSDEBCDICDF0415 MIB = 115 + + // OSDEBCDICDF03IRV is the MIB identifier with IANA name OSD_EBCDIC_DF03_IRV. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV + OSDEBCDICDF03IRV MIB = 116 + + // OSDEBCDICDF041 is the MIB identifier with IANA name OSD_EBCDIC_DF04_1. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1 + OSDEBCDICDF041 MIB = 117 + + // ISO115481 is the MIB identifier with IANA name ISO-11548-1. + // + // See http://www.iana.org/assignments/charset-reg/ISO-11548-1 + ISO115481 MIB = 118 + + // KZ1048 is the MIB identifier with IANA name KZ-1048. + // + // See http://www.iana.org/assignments/charset-reg/KZ-1048 + KZ1048 MIB = 119 + + // Unicode is the MIB identifier with IANA name ISO-10646-UCS-2. + // + // the 2-octet Basic Multilingual Plane, aka Unicode + // this needs to specify network byte order: the standard + // does not specify (it is a 16-bit integer space) + Unicode MIB = 1000 + + // UCS4 is the MIB identifier with IANA name ISO-10646-UCS-4. + // + // the full code space. (same comment about byte order, + // these are 31-bit numbers. + UCS4 MIB = 1001 + + // UnicodeASCII is the MIB identifier with IANA name ISO-10646-UCS-Basic. + // + // ASCII subset of Unicode. Basic Latin = collection 1 + // See ISO 10646, Appendix A + UnicodeASCII MIB = 1002 + + // UnicodeLatin1 is the MIB identifier with IANA name ISO-10646-Unicode-Latin1. + // + // ISO Latin-1 subset of Unicode. Basic Latin and Latin-1 + // Supplement = collections 1 and 2. See ISO 10646, + // Appendix A. See rfc1815 . + UnicodeLatin1 MIB = 1003 + + // UnicodeJapanese is the MIB identifier with IANA name ISO-10646-J-1. + // + // ISO 10646 Japanese, see rfc1815 . + UnicodeJapanese MIB = 1004 + + // UnicodeIBM1261 is the MIB identifier with IANA name ISO-Unicode-IBM-1261. + // + // IBM Latin-2, -3, -5, Extended Presentation Set, GCSGID: 1261 + UnicodeIBM1261 MIB = 1005 + + // UnicodeIBM1268 is the MIB identifier with IANA name ISO-Unicode-IBM-1268. + // + // IBM Latin-4 Extended Presentation Set, GCSGID: 1268 + UnicodeIBM1268 MIB = 1006 + + // UnicodeIBM1276 is the MIB identifier with IANA name ISO-Unicode-IBM-1276. + // + // IBM Cyrillic Greek Extended Presentation Set, GCSGID: 1276 + UnicodeIBM1276 MIB = 1007 + + // UnicodeIBM1264 is the MIB identifier with IANA name ISO-Unicode-IBM-1264. + // + // IBM Arabic Presentation Set, GCSGID: 1264 + UnicodeIBM1264 MIB = 1008 + + // UnicodeIBM1265 is the MIB identifier with IANA name ISO-Unicode-IBM-1265. + // + // IBM Hebrew Presentation Set, GCSGID: 1265 + UnicodeIBM1265 MIB = 1009 + + // Unicode11 is the MIB identifier with IANA name UNICODE-1-1. + // + // rfc1641 + // Reference: RFC1641 + Unicode11 MIB = 1010 + + // SCSU is the MIB identifier with IANA name SCSU. + // + // SCSU See http://www.iana.org/assignments/charset-reg/SCSU + SCSU MIB = 1011 + + // UTF7 is the MIB identifier with IANA name UTF-7. + // + // rfc2152 + // Reference: RFC2152 + UTF7 MIB = 1012 + + // UTF16BE is the MIB identifier with IANA name UTF-16BE. + // + // rfc2781 + // Reference: RFC2781 + UTF16BE MIB = 1013 + + // UTF16LE is the MIB identifier with IANA name UTF-16LE. + // + // rfc2781 + // Reference: RFC2781 + UTF16LE MIB = 1014 + + // UTF16 is the MIB identifier with IANA name UTF-16. + // + // rfc2781 + // Reference: RFC2781 + UTF16 MIB = 1015 + + // CESU8 is the MIB identifier with IANA name CESU-8. + // + // http://www.unicode.org/unicode/reports/tr26 + CESU8 MIB = 1016 + + // UTF32 is the MIB identifier with IANA name UTF-32. + // + // http://www.unicode.org/unicode/reports/tr19/ + UTF32 MIB = 1017 + + // UTF32BE is the MIB identifier with IANA name UTF-32BE. + // + // http://www.unicode.org/unicode/reports/tr19/ + UTF32BE MIB = 1018 + + // UTF32LE is the MIB identifier with IANA name UTF-32LE. + // + // http://www.unicode.org/unicode/reports/tr19/ + UTF32LE MIB = 1019 + + // BOCU1 is the MIB identifier with IANA name BOCU-1. + // + // http://www.unicode.org/notes/tn6/ + BOCU1 MIB = 1020 + + // Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1. + // + // Extended ISO 8859-1 Latin-1 for Windows 3.0. + // PCL Symbol Set id: 9U + Windows30Latin1 MIB = 2000 + + // Windows31Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.1-Latin-1. + // + // Extended ISO 8859-1 Latin-1 for Windows 3.1. + // PCL Symbol Set id: 19U + Windows31Latin1 MIB = 2001 + + // Windows31Latin2 is the MIB identifier with IANA name ISO-8859-2-Windows-Latin-2. + // + // Extended ISO 8859-2. Latin-2 for Windows 3.1. + // PCL Symbol Set id: 9E + Windows31Latin2 MIB = 2002 + + // Windows31Latin5 is the MIB identifier with IANA name ISO-8859-9-Windows-Latin-5. + // + // Extended ISO 8859-9. Latin-5 for Windows 3.1 + // PCL Symbol Set id: 5T + Windows31Latin5 MIB = 2003 + + // HPRoman8 is the MIB identifier with IANA name hp-roman8. + // + // LaserJet IIP Printer User's Manual, + // HP part no 33471-90901, Hewlet-Packard, June 1989. + // Reference: RFC1345 + HPRoman8 MIB = 2004 + + // AdobeStandardEncoding is the MIB identifier with IANA name Adobe-Standard-Encoding. + // + // PostScript Language Reference Manual + // PCL Symbol Set id: 10J + AdobeStandardEncoding MIB = 2005 + + // VenturaUS is the MIB identifier with IANA name Ventura-US. + // + // Ventura US. ASCII plus characters typically used in + // publishing, like pilcrow, copyright, registered, trade mark, + // section, dagger, and double dagger in the range A0 (hex) + // to FF (hex). + // PCL Symbol Set id: 14J + VenturaUS MIB = 2006 + + // VenturaInternational is the MIB identifier with IANA name Ventura-International. + // + // Ventura International. ASCII plus coded characters similar + // to Roman8. + // PCL Symbol Set id: 13J + VenturaInternational MIB = 2007 + + // DECMCS is the MIB identifier with IANA name DEC-MCS. + // + // VAX/VMS User's Manual, + // Order Number: AI-Y517A-TE, April 1986. + // Reference: RFC1345 + DECMCS MIB = 2008 + + // PC850Multilingual is the MIB identifier with IANA name IBM850. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC850Multilingual MIB = 2009 + + // PC8DanishNorwegian is the MIB identifier with IANA name PC8-Danish-Norwegian. + // + // PC Danish Norwegian + // 8-bit PC set for Danish Norwegian + // PCL Symbol Set id: 11U + PC8DanishNorwegian MIB = 2012 + + // PC862LatinHebrew is the MIB identifier with IANA name IBM862. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC862LatinHebrew MIB = 2013 + + // PC8Turkish is the MIB identifier with IANA name PC8-Turkish. + // + // PC Latin Turkish. PCL Symbol Set id: 9T + PC8Turkish MIB = 2014 + + // IBMSymbols is the MIB identifier with IANA name IBM-Symbols. + // + // Presentation Set, CPGID: 259 + IBMSymbols MIB = 2015 + + // IBMThai is the MIB identifier with IANA name IBM-Thai. + // + // Presentation Set, CPGID: 838 + IBMThai MIB = 2016 + + // HPLegal is the MIB identifier with IANA name HP-Legal. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 1U + HPLegal MIB = 2017 + + // HPPiFont is the MIB identifier with IANA name HP-Pi-font. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 15U + HPPiFont MIB = 2018 + + // HPMath8 is the MIB identifier with IANA name HP-Math8. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 8M + HPMath8 MIB = 2019 + + // HPPSMath is the MIB identifier with IANA name Adobe-Symbol-Encoding. + // + // PostScript Language Reference Manual + // PCL Symbol Set id: 5M + HPPSMath MIB = 2020 + + // HPDesktop is the MIB identifier with IANA name HP-DeskTop. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 7J + HPDesktop MIB = 2021 + + // VenturaMath is the MIB identifier with IANA name Ventura-Math. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 6M + VenturaMath MIB = 2022 + + // MicrosoftPublishing is the MIB identifier with IANA name Microsoft-Publishing. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 6J + MicrosoftPublishing MIB = 2023 + + // Windows31J is the MIB identifier with IANA name Windows-31J. + // + // Windows Japanese. A further extension of Shift_JIS + // to include NEC special characters (Row 13), NEC + // selection of IBM extensions (Rows 89 to 92), and IBM + // extensions (Rows 115 to 119). The CCS's are + // JIS X0201:1997, JIS X0208:1997, and these extensions. + // This charset can be used for the top-level media type "text", + // but it is of limited or specialized use (see rfc2278 ). + // PCL Symbol Set id: 19K + Windows31J MIB = 2024 + + // GB2312 is the MIB identifier with IANA name GB2312 (MIME: GB2312). + // + // Chinese for People's Republic of China (PRC) mixed one byte, + // two byte set: + // 20-7E = one byte ASCII + // A1-FE = two byte PRC Kanji + // See GB 2312-80 + // PCL Symbol Set Id: 18C + GB2312 MIB = 2025 + + // Big5 is the MIB identifier with IANA name Big5 (MIME: Big5). + // + // Chinese for Taiwan Multi-byte set. + // PCL Symbol Set Id: 18T + Big5 MIB = 2026 + + // Macintosh is the MIB identifier with IANA name macintosh. + // + // The Unicode Standard ver1.0, ISBN 0-201-56788-1, Oct 1991 + // Reference: RFC1345 + Macintosh MIB = 2027 + + // IBM037 is the MIB identifier with IANA name IBM037. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM037 MIB = 2028 + + // IBM038 is the MIB identifier with IANA name IBM038. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM038 MIB = 2029 + + // IBM273 is the MIB identifier with IANA name IBM273. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM273 MIB = 2030 + + // IBM274 is the MIB identifier with IANA name IBM274. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM274 MIB = 2031 + + // IBM275 is the MIB identifier with IANA name IBM275. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM275 MIB = 2032 + + // IBM277 is the MIB identifier with IANA name IBM277. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM277 MIB = 2033 + + // IBM278 is the MIB identifier with IANA name IBM278. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM278 MIB = 2034 + + // IBM280 is the MIB identifier with IANA name IBM280. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM280 MIB = 2035 + + // IBM281 is the MIB identifier with IANA name IBM281. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM281 MIB = 2036 + + // IBM284 is the MIB identifier with IANA name IBM284. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM284 MIB = 2037 + + // IBM285 is the MIB identifier with IANA name IBM285. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM285 MIB = 2038 + + // IBM290 is the MIB identifier with IANA name IBM290. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM290 MIB = 2039 + + // IBM297 is the MIB identifier with IANA name IBM297. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM297 MIB = 2040 + + // IBM420 is the MIB identifier with IANA name IBM420. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990, + // IBM NLS RM p 11-11 + // Reference: RFC1345 + IBM420 MIB = 2041 + + // IBM423 is the MIB identifier with IANA name IBM423. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM423 MIB = 2042 + + // IBM424 is the MIB identifier with IANA name IBM424. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM424 MIB = 2043 + + // PC8CodePage437 is the MIB identifier with IANA name IBM437. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC8CodePage437 MIB = 2011 + + // IBM500 is the MIB identifier with IANA name IBM500. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM500 MIB = 2044 + + // IBM851 is the MIB identifier with IANA name IBM851. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM851 MIB = 2045 + + // PCp852 is the MIB identifier with IANA name IBM852. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PCp852 MIB = 2010 + + // IBM855 is the MIB identifier with IANA name IBM855. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM855 MIB = 2046 + + // IBM857 is the MIB identifier with IANA name IBM857. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM857 MIB = 2047 + + // IBM860 is the MIB identifier with IANA name IBM860. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM860 MIB = 2048 + + // IBM861 is the MIB identifier with IANA name IBM861. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM861 MIB = 2049 + + // IBM863 is the MIB identifier with IANA name IBM863. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM863 MIB = 2050 + + // IBM864 is the MIB identifier with IANA name IBM864. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM864 MIB = 2051 + + // IBM865 is the MIB identifier with IANA name IBM865. + // + // IBM DOS 3.3 Ref (Abridged), 94X9575 (Feb 1987) + // Reference: RFC1345 + IBM865 MIB = 2052 + + // IBM868 is the MIB identifier with IANA name IBM868. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM868 MIB = 2053 + + // IBM869 is the MIB identifier with IANA name IBM869. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM869 MIB = 2054 + + // IBM870 is the MIB identifier with IANA name IBM870. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM870 MIB = 2055 + + // IBM871 is the MIB identifier with IANA name IBM871. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM871 MIB = 2056 + + // IBM880 is the MIB identifier with IANA name IBM880. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM880 MIB = 2057 + + // IBM891 is the MIB identifier with IANA name IBM891. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM891 MIB = 2058 + + // IBM903 is the MIB identifier with IANA name IBM903. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM903 MIB = 2059 + + // IBBM904 is the MIB identifier with IANA name IBM904. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBBM904 MIB = 2060 + + // IBM905 is the MIB identifier with IANA name IBM905. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM905 MIB = 2061 + + // IBM918 is the MIB identifier with IANA name IBM918. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM918 MIB = 2062 + + // IBM1026 is the MIB identifier with IANA name IBM1026. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM1026 MIB = 2063 + + // IBMEBCDICATDE is the MIB identifier with IANA name EBCDIC-AT-DE. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + IBMEBCDICATDE MIB = 2064 + + // EBCDICATDEA is the MIB identifier with IANA name EBCDIC-AT-DE-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICATDEA MIB = 2065 + + // EBCDICCAFR is the MIB identifier with IANA name EBCDIC-CA-FR. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICCAFR MIB = 2066 + + // EBCDICDKNO is the MIB identifier with IANA name EBCDIC-DK-NO. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICDKNO MIB = 2067 + + // EBCDICDKNOA is the MIB identifier with IANA name EBCDIC-DK-NO-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICDKNOA MIB = 2068 + + // EBCDICFISE is the MIB identifier with IANA name EBCDIC-FI-SE. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFISE MIB = 2069 + + // EBCDICFISEA is the MIB identifier with IANA name EBCDIC-FI-SE-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFISEA MIB = 2070 + + // EBCDICFR is the MIB identifier with IANA name EBCDIC-FR. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFR MIB = 2071 + + // EBCDICIT is the MIB identifier with IANA name EBCDIC-IT. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICIT MIB = 2072 + + // EBCDICPT is the MIB identifier with IANA name EBCDIC-PT. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICPT MIB = 2073 + + // EBCDICES is the MIB identifier with IANA name EBCDIC-ES. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICES MIB = 2074 + + // EBCDICESA is the MIB identifier with IANA name EBCDIC-ES-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICESA MIB = 2075 + + // EBCDICESS is the MIB identifier with IANA name EBCDIC-ES-S. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICESS MIB = 2076 + + // EBCDICUK is the MIB identifier with IANA name EBCDIC-UK. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICUK MIB = 2077 + + // EBCDICUS is the MIB identifier with IANA name EBCDIC-US. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICUS MIB = 2078 + + // Unknown8BiT is the MIB identifier with IANA name UNKNOWN-8BIT. + // + // Reference: RFC1428 + Unknown8BiT MIB = 2079 + + // Mnemonic is the MIB identifier with IANA name MNEMONIC. + // + // rfc1345 , also known as "mnemonic+ascii+38" + // Reference: RFC1345 + Mnemonic MIB = 2080 + + // Mnem is the MIB identifier with IANA name MNEM. + // + // rfc1345 , also known as "mnemonic+ascii+8200" + // Reference: RFC1345 + Mnem MIB = 2081 + + // VISCII is the MIB identifier with IANA name VISCII. + // + // rfc1456 + // Reference: RFC1456 + VISCII MIB = 2082 + + // VIQR is the MIB identifier with IANA name VIQR. + // + // rfc1456 + // Reference: RFC1456 + VIQR MIB = 2083 + + // KOI8R is the MIB identifier with IANA name KOI8-R (MIME: KOI8-R). + // + // rfc1489 , based on GOST-19768-74, ISO-6937/8, + // INIS-Cyrillic, ISO-5427. + // Reference: RFC1489 + KOI8R MIB = 2084 + + // HZGB2312 is the MIB identifier with IANA name HZ-GB-2312. + // + // rfc1842 , rfc1843 rfc1843 rfc1842 + HZGB2312 MIB = 2085 + + // IBM866 is the MIB identifier with IANA name IBM866. + // + // IBM NLDG Volume 2 (SE09-8002-03) August 1994 + IBM866 MIB = 2086 + + // PC775Baltic is the MIB identifier with IANA name IBM775. + // + // HP PCL 5 Comparison Guide (P/N 5021-0329) pp B-13, 1996 + PC775Baltic MIB = 2087 + + // KOI8U is the MIB identifier with IANA name KOI8-U. + // + // rfc2319 + // Reference: RFC2319 + KOI8U MIB = 2088 + + // IBM00858 is the MIB identifier with IANA name IBM00858. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM00858 + IBM00858 MIB = 2089 + + // IBM00924 is the MIB identifier with IANA name IBM00924. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM00924 + IBM00924 MIB = 2090 + + // IBM01140 is the MIB identifier with IANA name IBM01140. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01140 + IBM01140 MIB = 2091 + + // IBM01141 is the MIB identifier with IANA name IBM01141. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01141 + IBM01141 MIB = 2092 + + // IBM01142 is the MIB identifier with IANA name IBM01142. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01142 + IBM01142 MIB = 2093 + + // IBM01143 is the MIB identifier with IANA name IBM01143. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01143 + IBM01143 MIB = 2094 + + // IBM01144 is the MIB identifier with IANA name IBM01144. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01144 + IBM01144 MIB = 2095 + + // IBM01145 is the MIB identifier with IANA name IBM01145. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01145 + IBM01145 MIB = 2096 + + // IBM01146 is the MIB identifier with IANA name IBM01146. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01146 + IBM01146 MIB = 2097 + + // IBM01147 is the MIB identifier with IANA name IBM01147. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01147 + IBM01147 MIB = 2098 + + // IBM01148 is the MIB identifier with IANA name IBM01148. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01148 + IBM01148 MIB = 2099 + + // IBM01149 is the MIB identifier with IANA name IBM01149. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01149 + IBM01149 MIB = 2100 + + // Big5HKSCS is the MIB identifier with IANA name Big5-HKSCS. + // + // See http://www.iana.org/assignments/charset-reg/Big5-HKSCS + Big5HKSCS MIB = 2101 + + // IBM1047 is the MIB identifier with IANA name IBM1047. + // + // IBM1047 (EBCDIC Latin 1/Open Systems) http://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf + IBM1047 MIB = 2102 + + // PTCP154 is the MIB identifier with IANA name PTCP154. + // + // See http://www.iana.org/assignments/charset-reg/PTCP154 + PTCP154 MIB = 2103 + + // Amiga1251 is the MIB identifier with IANA name Amiga-1251. + // + // See http://www.amiga.ultranet.ru/Amiga-1251.html + Amiga1251 MIB = 2104 + + // KOI7switched is the MIB identifier with IANA name KOI7-switched. + // + // See http://www.iana.org/assignments/charset-reg/KOI7-switched + KOI7switched MIB = 2105 + + // BRF is the MIB identifier with IANA name BRF. + // + // See http://www.iana.org/assignments/charset-reg/BRF + BRF MIB = 2106 + + // TSCII is the MIB identifier with IANA name TSCII. + // + // See http://www.iana.org/assignments/charset-reg/TSCII + TSCII MIB = 2107 + + // CP51932 is the MIB identifier with IANA name CP51932. + // + // See http://www.iana.org/assignments/charset-reg/CP51932 + CP51932 MIB = 2108 + + // Windows874 is the MIB identifier with IANA name windows-874. + // + // See http://www.iana.org/assignments/charset-reg/windows-874 + Windows874 MIB = 2109 + + // Windows1250 is the MIB identifier with IANA name windows-1250. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1250 + Windows1250 MIB = 2250 + + // Windows1251 is the MIB identifier with IANA name windows-1251. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1251 + Windows1251 MIB = 2251 + + // Windows1252 is the MIB identifier with IANA name windows-1252. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1252 + Windows1252 MIB = 2252 + + // Windows1253 is the MIB identifier with IANA name windows-1253. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1253 + Windows1253 MIB = 2253 + + // Windows1254 is the MIB identifier with IANA name windows-1254. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1254 + Windows1254 MIB = 2254 + + // Windows1255 is the MIB identifier with IANA name windows-1255. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1255 + Windows1255 MIB = 2255 + + // Windows1256 is the MIB identifier with IANA name windows-1256. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1256 + Windows1256 MIB = 2256 + + // Windows1257 is the MIB identifier with IANA name windows-1257. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1257 + Windows1257 MIB = 2257 + + // Windows1258 is the MIB identifier with IANA name windows-1258. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1258 + Windows1258 MIB = 2258 + + // TIS620 is the MIB identifier with IANA name TIS-620. + // + // Thai Industrial Standards Institute (TISI) + TIS620 MIB = 2259 + + // CP50220 is the MIB identifier with IANA name CP50220. + // + // See http://www.iana.org/assignments/charset-reg/CP50220 + CP50220 MIB = 2260 +) diff --git a/vendor/golang.org/x/text/encoding/internal/internal.go b/vendor/golang.org/x/text/encoding/internal/internal.go new file mode 100644 index 000000000..75a5fd165 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/internal/internal.go @@ -0,0 +1,75 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains code that is shared among encoding implementations. +package internal + +import ( + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/transform" +) + +// Encoding is an implementation of the Encoding interface that adds the String +// and ID methods to an existing encoding. +type Encoding struct { + encoding.Encoding + Name string + MIB identifier.MIB +} + +// _ verifies that Encoding implements identifier.Interface. +var _ identifier.Interface = (*Encoding)(nil) + +func (e *Encoding) String() string { + return e.Name +} + +func (e *Encoding) ID() (mib identifier.MIB, other string) { + return e.MIB, "" +} + +// SimpleEncoding is an Encoding that combines two Transformers. +type SimpleEncoding struct { + Decoder transform.Transformer + Encoder transform.Transformer +} + +func (e *SimpleEncoding) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: e.Decoder} +} + +func (e *SimpleEncoding) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{Transformer: e.Encoder} +} + +// FuncEncoding is an Encoding that combines two functions returning a new +// Transformer. +type FuncEncoding struct { + Decoder func() transform.Transformer + Encoder func() transform.Transformer +} + +func (e FuncEncoding) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: e.Decoder()} +} + +func (e FuncEncoding) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{Transformer: e.Encoder()} +} + +// A RepertoireError indicates a rune is not in the repertoire of a destination +// encoding. It is associated with an encoding-specific suggested replacement +// byte. +type RepertoireError byte + +// Error implements the error interrface. +func (r RepertoireError) Error() string { + return "encoding: rune not supported by encoding." +} + +// Replacement returns the replacement string associated with this error. +func (r RepertoireError) Replacement() byte { return byte(r) } + +var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub) diff --git a/vendor/golang.org/x/text/encoding/unicode/override.go b/vendor/golang.org/x/text/encoding/unicode/override.go new file mode 100644 index 000000000..35d62fcc9 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/unicode/override.go @@ -0,0 +1,82 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unicode + +import ( + "golang.org/x/text/transform" +) + +// BOMOverride returns a new decoder transformer that is identical to fallback, +// except that the presence of a Byte Order Mark at the start of the input +// causes it to switch to the corresponding Unicode decoding. It will only +// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE. +// +// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not +// just UTF-16 variants, and allowing falling back to any encoding scheme. +// +// This technique is recommended by the W3C for use in HTML 5: "For +// compatibility with deployed content, the byte order mark (also known as BOM) +// is considered more authoritative than anything else." +// http://www.w3.org/TR/encoding/#specification-hooks +// +// Using BOMOverride is mostly intended for use cases where the first characters +// of a fallback encoding are known to not be a BOM, for example, for valid HTML +// and most encodings. +func BOMOverride(fallback transform.Transformer) transform.Transformer { + // TODO: possibly allow a variadic argument of unicode encodings to allow + // specifying details of which fallbacks are supported as well as + // specifying the details of the implementations. This would also allow for + // support for UTF-32, which should not be supported by default. + return &bomOverride{fallback: fallback} +} + +type bomOverride struct { + fallback transform.Transformer + current transform.Transformer +} + +func (d *bomOverride) Reset() { + d.current = nil + d.fallback.Reset() +} + +var ( + // TODO: we could use decode functions here, instead of allocating a new + // decoder on every NewDecoder as IgnoreBOM decoders can be stateless. + utf16le = UTF16(LittleEndian, IgnoreBOM) + utf16be = UTF16(BigEndian, IgnoreBOM) +) + +const utf8BOM = "\ufeff" + +func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if d.current != nil { + return d.current.Transform(dst, src, atEOF) + } + if len(src) < 3 && !atEOF { + return 0, 0, transform.ErrShortSrc + } + d.current = d.fallback + bomSize := 0 + if len(src) >= 2 { + if src[0] == 0xFF && src[1] == 0xFE { + d.current = utf16le.NewDecoder() + bomSize = 2 + } else if src[0] == 0xFE && src[1] == 0xFF { + d.current = utf16be.NewDecoder() + bomSize = 2 + } else if len(src) >= 3 && + src[0] == utf8BOM[0] && + src[1] == utf8BOM[1] && + src[2] == utf8BOM[2] { + d.current = transform.Nop + bomSize = 3 + } + } + if bomSize < len(src) { + nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF) + } + return nDst, nSrc + bomSize, err +} diff --git a/vendor/golang.org/x/text/encoding/unicode/unicode.go b/vendor/golang.org/x/text/encoding/unicode/unicode.go new file mode 100644 index 000000000..579cadfb1 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/unicode/unicode.go @@ -0,0 +1,434 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unicode provides Unicode encodings such as UTF-16. +package unicode // import "golang.org/x/text/encoding/unicode" + +import ( + "errors" + "unicode/utf16" + "unicode/utf8" + + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/internal" + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/internal/utf8internal" + "golang.org/x/text/runes" + "golang.org/x/text/transform" +) + +// TODO: I think the Transformers really should return errors on unmatched +// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781, +// which leaves it open, but is suggested by WhatWG. It will allow for all error +// modes as defined by WhatWG: fatal, HTML and Replacement. This would require +// the introduction of some kind of error type for conveying the erroneous code +// point. + +// UTF8 is the UTF-8 encoding. +var UTF8 encoding.Encoding = utf8enc + +var utf8enc = &internal.Encoding{ + &internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()}, + "UTF-8", + identifier.UTF8, +} + +type utf8Decoder struct{ transform.NopResetter } + +func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + var pSrc int // point from which to start copy in src + var accept utf8internal.AcceptRange + + // The decoder can only make the input larger, not smaller. + n := len(src) + if len(dst) < n { + err = transform.ErrShortDst + n = len(dst) + atEOF = false + } + for nSrc < n { + c := src[nSrc] + if c < utf8.RuneSelf { + nSrc++ + continue + } + first := utf8internal.First[c] + size := int(first & utf8internal.SizeMask) + if first == utf8internal.FirstInvalid { + goto handleInvalid // invalid starter byte + } + accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift] + if nSrc+size > n { + if !atEOF { + // We may stop earlier than necessary here if the short sequence + // has invalid bytes. Not checking for this simplifies the code + // and may avoid duplicate computations in certain conditions. + if err == nil { + err = transform.ErrShortSrc + } + break + } + // Determine the maximal subpart of an ill-formed subsequence. + switch { + case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]: + size = 1 + case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]: + size = 2 + default: + size = 3 // As we are short, the maximum is 3. + } + goto handleInvalid + } + if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c { + size = 1 + goto handleInvalid // invalid continuation byte + } else if size == 2 { + } else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c { + size = 2 + goto handleInvalid // invalid continuation byte + } else if size == 3 { + } else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c { + size = 3 + goto handleInvalid // invalid continuation byte + } + nSrc += size + continue + + handleInvalid: + // Copy the scanned input so far. + nDst += copy(dst[nDst:], src[pSrc:nSrc]) + + // Append RuneError to the destination. + const runeError = "\ufffd" + if nDst+len(runeError) > len(dst) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += copy(dst[nDst:], runeError) + + // Skip the maximal subpart of an ill-formed subsequence according to + // the W3C standard way instead of the Go way. This Transform is + // probably the only place in the text repo where it is warranted. + nSrc += size + pSrc = nSrc + + // Recompute the maximum source length. + if sz := len(dst) - nDst; sz < len(src)-nSrc { + err = transform.ErrShortDst + n = nSrc + sz + atEOF = false + } + } + return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err +} + +// UTF16 returns a UTF-16 Encoding for the given default endianness and byte +// order mark (BOM) policy. +// +// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then +// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect +// the endianness used for decoding, and will instead be output as their +// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy +// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output. +// Instead, it overrides the default endianness e for the remainder of the +// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not +// affect the endianness used, and will instead be output as their standard +// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed +// with the default Endianness. For ExpectBOM, in that case, the transformation +// will return early with an ErrMissingBOM error. +// +// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of +// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not +// be inserted. The UTF-8 input does not need to contain a BOM. +// +// There is no concept of a 'native' endianness. If the UTF-16 data is produced +// and consumed in a greater context that implies a certain endianness, use +// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM. +// +// In the language of http://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM +// corresponds to "Where the precise type of the data stream is known... the +// BOM should not be used" and ExpectBOM corresponds to "A particular +// protocol... may require use of the BOM". +func UTF16(e Endianness, b BOMPolicy) encoding.Encoding { + return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]} +} + +// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that +// some configurations map to the same MIB identifier. RFC 2781 has requirements +// and recommendations. Some of the "configurations" are merely recommendations, +// so multiple configurations could match. +var mibValue = map[Endianness][numBOMValues]identifier.MIB{ + BigEndian: [numBOMValues]identifier.MIB{ + IgnoreBOM: identifier.UTF16BE, + UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781. + // TODO: acceptBOM | strictBOM would map to UTF16BE as well. + }, + LittleEndian: [numBOMValues]identifier.MIB{ + IgnoreBOM: identifier.UTF16LE, + UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows. + // TODO: acceptBOM | strictBOM would map to UTF16LE as well. + }, + // ExpectBOM is not widely used and has no valid MIB identifier. +} + +// All lists a configuration for each IANA-defined UTF-16 variant. +var All = []encoding.Encoding{ + UTF8, + UTF16(BigEndian, UseBOM), + UTF16(BigEndian, IgnoreBOM), + UTF16(LittleEndian, IgnoreBOM), +} + +// BOMPolicy is a UTF-16 encoding's byte order mark policy. +type BOMPolicy uint8 + +const ( + writeBOM BOMPolicy = 0x01 + acceptBOM BOMPolicy = 0x02 + requireBOM BOMPolicy = 0x04 + bomMask BOMPolicy = 0x07 + + // HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a + // map of an array of length 8 of a type that is also used as a key or value + // in another map). See golang.org/issue/11354. + // TODO: consider changing this value back to 8 if the use of 1.4.* has + // been minimized. + numBOMValues = 8 + 1 + + // IgnoreBOM means to ignore any byte order marks. + IgnoreBOM BOMPolicy = 0 + // Common and RFC 2781-compliant interpretation for UTF-16BE/LE. + + // UseBOM means that the UTF-16 form may start with a byte order mark, which + // will be used to override the default encoding. + UseBOM BOMPolicy = writeBOM | acceptBOM + // Common and RFC 2781-compliant interpretation for UTF-16. + + // ExpectBOM means that the UTF-16 form must start with a byte order mark, + // which will be used to override the default encoding. + ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM + // Used in Java as Unicode (not to be confused with Java's UTF-16) and + // ICU's UTF-16,version=1. Not compliant with RFC 2781. + + // TODO (maybe): strictBOM: BOM must match Endianness. This would allow: + // - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM + // (UnicodeBig and UnicodeLittle in Java) + // - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E: + // acceptBOM | strictBOM (e.g. assigned to CheckBOM). + // This addition would be consistent with supporting ExpectBOM. +) + +// Endianness is a UTF-16 encoding's default endianness. +type Endianness bool + +const ( + // BigEndian is UTF-16BE. + BigEndian Endianness = false + // LittleEndian is UTF-16LE. + LittleEndian Endianness = true +) + +// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a +// starting byte order mark. +var ErrMissingBOM = errors.New("encoding: missing byte order mark") + +type utf16Encoding struct { + config + mib identifier.MIB +} + +type config struct { + endianness Endianness + bomPolicy BOMPolicy +} + +func (u utf16Encoding) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: &utf16Decoder{ + initial: u.config, + current: u.config, + }} +} + +func (u utf16Encoding) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{Transformer: &utf16Encoder{ + endianness: u.endianness, + initialBOMPolicy: u.bomPolicy, + currentBOMPolicy: u.bomPolicy, + }} +} + +func (u utf16Encoding) ID() (mib identifier.MIB, other string) { + return u.mib, "" +} + +func (u utf16Encoding) String() string { + e, b := "B", "" + if u.endianness == LittleEndian { + e = "L" + } + switch u.bomPolicy { + case ExpectBOM: + b = "Expect" + case UseBOM: + b = "Use" + case IgnoreBOM: + b = "Ignore" + } + return "UTF-16" + e + "E (" + b + " BOM)" +} + +type utf16Decoder struct { + initial config + current config +} + +func (u *utf16Decoder) Reset() { + u.current = u.initial +} + +func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if len(src) == 0 { + if atEOF && u.current.bomPolicy&requireBOM != 0 { + return 0, 0, ErrMissingBOM + } + return 0, 0, nil + } + if u.current.bomPolicy&acceptBOM != 0 { + if len(src) < 2 { + return 0, 0, transform.ErrShortSrc + } + switch { + case src[0] == 0xfe && src[1] == 0xff: + u.current.endianness = BigEndian + nSrc = 2 + case src[0] == 0xff && src[1] == 0xfe: + u.current.endianness = LittleEndian + nSrc = 2 + default: + if u.current.bomPolicy&requireBOM != 0 { + return 0, 0, ErrMissingBOM + } + } + u.current.bomPolicy = IgnoreBOM + } + + var r rune + var dSize, sSize int + for nSrc < len(src) { + if nSrc+1 < len(src) { + x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1]) + if u.current.endianness == LittleEndian { + x = x>>8 | x<<8 + } + r, sSize = rune(x), 2 + if utf16.IsSurrogate(r) { + if nSrc+3 < len(src) { + x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3]) + if u.current.endianness == LittleEndian { + x = x>>8 | x<<8 + } + // Save for next iteration if it is not a high surrogate. + if isHighSurrogate(rune(x)) { + r, sSize = utf16.DecodeRune(r, rune(x)), 4 + } + } else if !atEOF { + err = transform.ErrShortSrc + break + } + } + if dSize = utf8.RuneLen(r); dSize < 0 { + r, dSize = utf8.RuneError, 3 + } + } else if atEOF { + // Single trailing byte. + r, dSize, sSize = utf8.RuneError, 3, 1 + } else { + err = transform.ErrShortSrc + break + } + if nDst+dSize > len(dst) { + err = transform.ErrShortDst + break + } + nDst += utf8.EncodeRune(dst[nDst:], r) + nSrc += sSize + } + return nDst, nSrc, err +} + +func isHighSurrogate(r rune) bool { + return 0xDC00 <= r && r <= 0xDFFF +} + +type utf16Encoder struct { + endianness Endianness + initialBOMPolicy BOMPolicy + currentBOMPolicy BOMPolicy +} + +func (u *utf16Encoder) Reset() { + u.currentBOMPolicy = u.initialBOMPolicy +} + +func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if u.currentBOMPolicy&writeBOM != 0 { + if len(dst) < 2 { + return 0, 0, transform.ErrShortDst + } + dst[0], dst[1] = 0xfe, 0xff + u.currentBOMPolicy = IgnoreBOM + nDst = 2 + } + + r, size := rune(0), 0 + for nSrc < len(src) { + r = rune(src[nSrc]) + + // Decode a 1-byte rune. + if r < utf8.RuneSelf { + size = 1 + + } else { + // Decode a multi-byte rune. + r, size = utf8.DecodeRune(src[nSrc:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + } + } + + if r <= 0xffff { + if nDst+2 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = uint8(r >> 8) + dst[nDst+1] = uint8(r) + nDst += 2 + } else { + if nDst+4 > len(dst) { + err = transform.ErrShortDst + break + } + r1, r2 := utf16.EncodeRune(r) + dst[nDst+0] = uint8(r1 >> 8) + dst[nDst+1] = uint8(r1) + dst[nDst+2] = uint8(r2 >> 8) + dst[nDst+3] = uint8(r2) + nDst += 4 + } + nSrc += size + } + + if u.endianness == LittleEndian { + for i := 0; i < nDst; i += 2 { + dst[i], dst[i+1] = dst[i+1], dst[i] + } + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go new file mode 100644 index 000000000..ca917d281 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/code.go @@ -0,0 +1,338 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gen + +import ( + "bytes" + "encoding/gob" + "fmt" + "hash" + "hash/fnv" + "io" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// This file contains utilities for generating code. + +// TODO: other write methods like: +// - slices, maps, types, etc. + +// CodeWriter is a utility for writing structured code. It computes the content +// hash and size of written content. It ensures there are newlines between +// written code blocks. +type CodeWriter struct { + buf bytes.Buffer + Size int + Hash hash.Hash32 // content hash + gob *gob.Encoder + // For comments we skip the usual one-line separator if they are followed by + // a code block. + skipSep bool +} + +func (w *CodeWriter) Write(p []byte) (n int, err error) { + return w.buf.Write(p) +} + +// NewCodeWriter returns a new CodeWriter. +func NewCodeWriter() *CodeWriter { + h := fnv.New32() + return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} +} + +// WriteGoFile appends the buffer with the total size of all created structures +// and writes it as a Go file to the the given file with the given package name. +func (w *CodeWriter) WriteGoFile(filename, pkg string) { + f, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer f.Close() + if _, err = w.WriteGo(f, pkg); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo appends the buffer with the total size of all created structures and +// writes it as a Go file to the the given writer with the given package name. +func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { + sz := w.Size + w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) + defer w.buf.Reset() + return WriteGo(out, pkg, w.buf.Bytes()) +} + +func (w *CodeWriter) printf(f string, x ...interface{}) { + fmt.Fprintf(w, f, x...) +} + +func (w *CodeWriter) insertSep() { + if w.skipSep { + w.skipSep = false + return + } + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n") +} + +// WriteComment writes a comment block. All line starts are prefixed with "//". +// Initial empty lines are gobbled. The indentation for the first line is +// stripped from consecutive lines. +func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { + s := fmt.Sprintf(comment, args...) + s = strings.Trim(s, "\n") + + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n// ") + w.skipSep = true + + // strip first indent level. + sep := "\n" + for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { + sep += s[:1] + } + + strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) + + w.printf("\n") +} + +func (w *CodeWriter) writeSizeInfo(size int) { + w.printf("// Size: %d bytes\n", size) +} + +// WriteConst writes a constant of the given name and value. +func (w *CodeWriter) WriteConst(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + + switch v.Type().Kind() { + case reflect.String: + // See golang.org/issue/13145. + const arbitraryCutoff = 16 + if v.Len() > arbitraryCutoff { + w.printf("var %s %s = ", name, typeName(x)) + } else { + w.printf("const %s %s = ", name, typeName(x)) + } + w.WriteString(v.String()) + w.printf("\n") + default: + w.printf("const %s = %#v\n", name, x) + } +} + +// WriteVar writes a variable of the given name and value. +func (w *CodeWriter) WriteVar(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + oldSize := w.Size + sz := int(v.Type().Size()) + w.Size += sz + + switch v.Type().Kind() { + case reflect.String: + w.printf("var %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + case reflect.Struct: + w.gob.Encode(x) + fallthrough + case reflect.Slice, reflect.Array: + w.printf("var %s = ", name) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + default: + w.printf("var %s %s = ", name, typeName(x)) + w.gob.Encode(x) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + } + w.printf("\n") +} + +func (w *CodeWriter) writeValue(v reflect.Value) { + x := v.Interface() + switch v.Kind() { + case reflect.String: + w.WriteString(v.String()) + case reflect.Array: + // Don't double count: callers of WriteArray count on the size being + // added, so we need to discount it here. + w.Size -= int(v.Type().Size()) + w.writeSlice(x, true) + case reflect.Slice: + w.writeSlice(x, false) + case reflect.Struct: + w.printf("%s{\n", typeName(v.Interface())) + t := v.Type() + for i := 0; i < v.NumField(); i++ { + w.printf("%s: ", t.Field(i).Name) + w.writeValue(v.Field(i)) + w.printf(",\n") + } + w.printf("}") + default: + w.printf("%#v", x) + } +} + +// WriteString writes a string literal. +func (w *CodeWriter) WriteString(s string) { + io.WriteString(w.Hash, s) // content hash + w.Size += len(s) + + const maxInline = 40 + if len(s) <= maxInline { + w.printf("%q", s) + return + } + + // We will render the string as a multi-line string. + const maxWidth = 80 - 4 - len(`"`) - len(`" +`) + + // When starting on its own line, go fmt indents line 2+ an extra level. + n, max := maxWidth, maxWidth-4 + + // Print "" +\n, if a string does not start on its own line. + b := w.buf.Bytes() + if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { + w.printf("\"\" + // Size: %d bytes\n", len(s)) + n, max = maxWidth, maxWidth + } + + w.printf(`"`) + + for sz, p := 0, 0; p < len(s); { + var r rune + r, sz = utf8.DecodeRuneInString(s[p:]) + out := s[p : p+sz] + chars := 1 + if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { + switch sz { + case 1: + out = fmt.Sprintf("\\x%02x", s[p]) + case 2, 3: + out = fmt.Sprintf("\\u%04x", r) + case 4: + out = fmt.Sprintf("\\U%08x", r) + } + chars = len(out) + } + if n -= chars; n < 0 { + w.printf("\" +\n\"") + n = max - len(out) + } + w.printf("%s", out) + p += sz + } + w.printf(`"`) +} + +// WriteSlice writes a slice value. +func (w *CodeWriter) WriteSlice(x interface{}) { + w.writeSlice(x, false) +} + +// WriteArray writes an array value. +func (w *CodeWriter) WriteArray(x interface{}) { + w.writeSlice(x, true) +} + +func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { + v := reflect.ValueOf(x) + w.gob.Encode(v.Len()) + w.Size += v.Len() * int(v.Type().Elem().Size()) + name := typeName(x) + if isArray { + name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) + } + if isArray { + w.printf("%s{\n", name) + } else { + w.printf("%s{ // %d elements\n", name, v.Len()) + } + + switch kind := v.Type().Elem().Kind(); kind { + case reflect.String: + for _, s := range x.([]string) { + w.WriteString(s) + w.printf(",\n") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // nLine and nBlock are the number of elements per line and block. + nLine, nBlock, format := 8, 64, "%d," + switch kind { + case reflect.Uint8: + format = "%#02x," + case reflect.Uint16: + format = "%#04x," + case reflect.Uint32: + nLine, nBlock, format = 4, 32, "%#08x," + case reflect.Uint, reflect.Uint64: + nLine, nBlock, format = 4, 32, "%#016x," + case reflect.Int8: + nLine = 16 + } + n := nLine + for i := 0; i < v.Len(); i++ { + if i%nBlock == 0 && v.Len() > nBlock { + w.printf("// Entry %X - %X\n", i, i+nBlock-1) + } + x := v.Index(i).Interface() + w.gob.Encode(x) + w.printf(format, x) + if n--; n == 0 { + n = nLine + w.printf("\n") + } + } + w.printf("\n") + case reflect.Struct: + zero := reflect.Zero(v.Type().Elem()).Interface() + for i := 0; i < v.Len(); i++ { + x := v.Index(i).Interface() + w.gob.EncodeValue(v) + if !reflect.DeepEqual(zero, x) { + line := fmt.Sprintf("%#v,\n", x) + line = line[strings.IndexByte(line, '{'):] + w.printf("%d: ", i) + w.printf(line) + } + } + case reflect.Array: + for i := 0; i < v.Len(); i++ { + w.printf("%d: %#v,\n", i, v.Index(i).Interface()) + } + default: + panic("gen: slice elem type not supported") + } + w.printf("}") +} + +// WriteType writes a definition of the type of the given value and returns the +// type name. +func (w *CodeWriter) WriteType(x interface{}) string { + t := reflect.TypeOf(x) + w.printf("type %s struct {\n", t.Name()) + for i := 0; i < t.NumField(); i++ { + w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) + } + w.printf("}\n") + return t.Name() +} + +// typeName returns the name of the go type of x. +func typeName(x interface{}) string { + t := reflect.ValueOf(x).Type() + return strings.Replace(fmt.Sprint(t), "main.", "", 1) +} diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go new file mode 100644 index 000000000..dfaa278a1 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/gen.go @@ -0,0 +1,226 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gen contains common code for the various code generation tools in the +// text repository. Its usage ensures consistency between tools. +// +// This package defines command line flags that are common to most generation +// tools. The flags allow for specifying specific Unicode and CLDR versions +// in the public Unicode data repository (http://www.unicode.org/Public). +// +// A local Unicode data mirror can be set through the flag -local or the +// environment variable UNICODE_DIR. The former takes precedence. The local +// directory should follow the same structure as the public repository. +// +// IANA data can also optionally be mirrored by putting it in the iana directory +// rooted at the top of the local mirror. Beware, though, that IANA data is not +// versioned. So it is up to the developer to use the right version. +package gen // import "golang.org/x/text/internal/gen" + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path" + "path/filepath" + "unicode" + + "golang.org/x/text/unicode/cldr" +) + +var ( + url = flag.String("url", + "http://www.unicode.org/Public", + "URL of Unicode database directory") + iana = flag.String("iana", + "http://www.iana.org", + "URL of the IANA repository") + unicodeVersion = flag.String("unicode", + getEnv("UNICODE_VERSION", unicode.Version), + "unicode version to use") + cldrVersion = flag.String("cldr", + getEnv("CLDR_VERSION", cldr.Version), + "cldr version to use") + // Allow an environment variable to specify the local directory. + // go generate doesn't allow specifying arguments; this is a useful + // alternative to specifying a local mirror. + localDir = flag.String("local", + os.Getenv("UNICODE_DIR"), + "directory containing local data files; for debugging only.") +) + +func getEnv(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +// Init performs common initialization for a gen command. It parses the flags +// and sets up the standard logging parameters. +func Init() { + log.SetPrefix("") + log.SetFlags(log.Lshortfile) + flag.Parse() +} + +const header = `// This file was generated by go generate; DO NOT EDIT + +package %s + +` + +// UnicodeVersion reports the requested Unicode version. +func UnicodeVersion() string { + return *unicodeVersion +} + +// UnicodeVersion reports the requested CLDR version. +func CLDRVersion() string { + return *cldrVersion +} + +// IsLocal reports whether the user specified a local directory. +func IsLocal() bool { + return *localDir != "" +} + +// OpenUCDFile opens the requested UCD file. The file is specified relative to +// the public Unicode root directory. It will call log.Fatal if there are any +// errors. +func OpenUCDFile(file string) io.ReadCloser { + return openUnicode(path.Join(*unicodeVersion, "ucd", file)) +} + +// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there +// are any errors. +func OpenCLDRCoreZip() io.ReadCloser { + return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") +} + +// OpenUnicodeFile opens the requested file of the requested category from the +// root of the Unicode data archive. The file is specified relative to the +// public Unicode root directory. If version is "", it will use the default +// Unicode version. It will call log.Fatal if there are any errors. +func OpenUnicodeFile(category, version, file string) io.ReadCloser { + if version == "" { + version = UnicodeVersion() + } + return openUnicode(path.Join(category, version, file)) +} + +// OpenIANAFile opens the requested IANA file. The file is specified relative +// to the IANA root, which is typically either http://www.iana.org or the +// iana directory in the local mirror. It will call log.Fatal if there are any +// errors. +func OpenIANAFile(path string) io.ReadCloser { + return Open(*iana, "iana", path) +} + +// Open opens subdir/path if a local directory is specified and the file exists, +// where subdir is a directory relative to the local root, or fetches it from +// urlRoot/path otherwise. It will call log.Fatal if there are any errors. +func Open(urlRoot, subdir, path string) io.ReadCloser { + if *localDir != "" { + path = filepath.FromSlash(path) + if f, err := os.Open(filepath.Join(*localDir, subdir, path)); err == nil { + return f + } + } + return get(urlRoot, path) +} + +func openUnicode(path string) io.ReadCloser { + if *localDir != "" { + path = filepath.FromSlash(path) + f, err := os.Open(filepath.Join(*localDir, path)) + if err != nil { + log.Fatal(err) + } + return f + } + return get(*url, path) +} + +func get(root, path string) io.ReadCloser { + url := root + "/" + path + fmt.Printf("Fetching %s...", url) + defer fmt.Println(" done.") + resp, err := http.Get(url) + if err != nil { + log.Fatalf("HTTP GET: %v", err) + } + if resp.StatusCode != 200 { + log.Fatalf("Bad GET status for %q: %q", url, resp.Status) + } + return resp.Body +} + +// TODO: use Write*Version in all applicable packages. + +// WriteUnicodeVersion writes a constant for the Unicode version from which the +// tables are generated. +func WriteUnicodeVersion(w io.Writer) { + fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) +} + +// WriteCLDRVersion writes a constant for the CLDR version from which the +// tables are generated. +func WriteCLDRVersion(w io.Writer) { + fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) +} + +// WriteGoFile prepends a standard file comment and package statement to the +// given bytes, applies gofmt, and writes them to a file with the given name. +// It will call log.Fatal if there are any errors. +func WriteGoFile(filename, pkg string, b []byte) { + w, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer w.Close() + if _, err = WriteGo(w, pkg, b); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo prepends a standard file comment and package statement to the given +// bytes, applies gofmt, and writes them to w. +func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { + src := []byte(fmt.Sprintf(header, pkg)) + src = append(src, b...) + formatted, err := format.Source(src) + if err != nil { + // Print the generated code even in case of an error so that the + // returned error can be meaningfully interpreted. + n, _ = w.Write(src) + return n, err + } + return w.Write(formatted) +} + +// Repackage rewrites a Go file from belonging to package main to belonging to +// the given package. +func Repackage(inFile, outFile, pkg string) { + src, err := ioutil.ReadFile(inFile) + if err != nil { + log.Fatalf("reading %s: %v", inFile, err) + } + const toDelete = "package main\n\n" + i := bytes.Index(src, []byte(toDelete)) + if i < 0 { + log.Fatalf("Could not find %q in %s.", toDelete, inFile) + } + w := &bytes.Buffer{} + w.Write(src[i+len(toDelete):]) + WriteGoFile(outFile, pkg, w.Bytes()) +} diff --git a/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go b/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go new file mode 100644 index 000000000..575cea870 --- /dev/null +++ b/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go @@ -0,0 +1,87 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package utf8internal contains low-level utf8-related constants, tables, etc. +// that are used internally by the text package. +package utf8internal + +// The default lowest and highest continuation byte. +const ( + LoCB = 0x80 // 1000 0000 + HiCB = 0xBF // 1011 1111 +) + +// Constants related to getting information of first bytes of UTF-8 sequences. +const ( + // ASCII identifies a UTF-8 byte as ASCII. + ASCII = as + + // FirstInvalid indicates a byte is invalid as a first byte of a UTF-8 + // sequence. + FirstInvalid = xx + + // SizeMask is a mask for the size bits. Use use x&SizeMask to get the size. + SizeMask = 7 + + // AcceptShift is the right-shift count for the first byte info byte to get + // the index into the AcceptRanges table. See AcceptRanges. + AcceptShift = 4 + + // The names of these constants are chosen to give nice alignment in the + // table below. The first nibble is an index into acceptRanges or F for + // special one-byte cases. The second nibble is the Rune length or the + // Status for the special one-byte case. + xx = 0xF1 // invalid: size 1 + as = 0xF0 // ASCII: size 1 + s1 = 0x02 // accept 0, size 2 + s2 = 0x13 // accept 1, size 3 + s3 = 0x03 // accept 0, size 3 + s4 = 0x23 // accept 2, size 3 + s5 = 0x34 // accept 3, size 4 + s6 = 0x04 // accept 0, size 4 + s7 = 0x44 // accept 4, size 4 +) + +// First is information about the first byte in a UTF-8 sequence. +var First = [256]uint8{ + // 1 2 3 4 5 6 7 8 9 A B C D E F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF + xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF + s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF + s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF + s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF +} + +// AcceptRange gives the range of valid values for the second byte in a UTF-8 +// sequence for any value for First that is not ASCII or FirstInvalid. +type AcceptRange struct { + Lo uint8 // lowest value for second byte. + Hi uint8 // highest value for second byte. +} + +// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b +// +// AcceptRanges[First[b[0]]>>AcceptShift] +// +// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting +// at b[0]. +var AcceptRanges = [...]AcceptRange{ + 0: {LoCB, HiCB}, + 1: {0xA0, HiCB}, + 2: {LoCB, 0x9F}, + 3: {0x90, HiCB}, + 4: {LoCB, 0x8F}, +} diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go new file mode 100644 index 000000000..ae7a92158 --- /dev/null +++ b/vendor/golang.org/x/text/runes/cond.go @@ -0,0 +1,126 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runes + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is. +// This is done for various reasons: +// - To retain the semantics of the Nop transformer: if input is passed to a Nop +// one would expect it to be unchanged. +// - It would be very expensive to pass a converted RuneError to a transformer: +// a transformer might need more source bytes after RuneError, meaning that +// the only way to pass it safely is to create a new buffer and manage the +// intermingling of RuneErrors and normal input. +// - Many transformers leave ill-formed UTF-8 as is, so this is not +// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a +// logical consequence of the operation (as for Map) or if it otherwise would +// pose security concerns (as for Remove). +// - An alternative would be to return an error on ill-formed UTF-8, but this +// would be inconsistent with other operations. + +// If returns a transformer that applies tIn to consecutive runes for which +// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset +// is called on tIn and tNotIn at the start of each run. A Nop transformer will +// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated +// to RuneError to determine which transformer to apply, but is passed as is to +// the respective transformer. +func If(s Set, tIn, tNotIn transform.Transformer) Transformer { + if tIn == nil && tNotIn == nil { + return Transformer{transform.Nop} + } + if tIn == nil { + tIn = transform.Nop + } + if tNotIn == nil { + tNotIn = transform.Nop + } + a := &cond{ + tIn: tIn, + tNotIn: tNotIn, + f: s.Contains, + } + a.Reset() + return Transformer{a} +} + +type cond struct { + tIn, tNotIn transform.Transformer + f func(rune) bool + check func(rune) bool // current check to perform + t transform.Transformer // current transformer to use +} + +// Reset implements transform.Transformer. +func (t *cond) Reset() { + t.check = t.is + t.t = t.tIn + t.t.Reset() // notIn will be reset on first usage. +} + +func (t *cond) is(r rune) bool { + if t.f(r) { + return true + } + t.check = t.isNot + t.t = t.tNotIn + t.tNotIn.Reset() + return false +} + +func (t *cond) isNot(r rune) bool { + if !t.f(r) { + return true + } + t.check = t.is + t.t = t.tIn + t.tIn.Reset() + return false +} + +func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + p := 0 + for nSrc < len(src) && err == nil { + // Don't process too much at a time, as the work might be wasted if the + // destination buffer isn't large enough to hold the result or a + // transform returns an error early. + const maxChunk = 4096 + max := len(src) + if n := nSrc + maxChunk; n < len(src) { + max = n + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + var r rune + r, size = utf8.DecodeRune(src[p:]) + if r == utf8.RuneError && size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src))) + nDst += nDst2 + nSrc += nSrc2 + if err2 != nil { + return nDst, nSrc, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = nSrc + size + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go new file mode 100644 index 000000000..bb17f475b --- /dev/null +++ b/vendor/golang.org/x/text/runes/runes.go @@ -0,0 +1,278 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package runes provide transforms for UTF-8 encoded text. +package runes // import "golang.org/x/text/runes" + +import ( + "unicode" + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// A Set is a collection of runes. +type Set interface { + // Contains returns true if r is contained in the set. + Contains(r rune) bool +} + +type setFunc func(rune) bool + +func (s setFunc) Contains(r rune) bool { + return s(r) +} + +// Note: using funcs here instead of wrapping types result in cleaner +// documentation and a smaller API. + +// In creates a Set with a Contains method that returns true for all runes in +// the given RangeTable. +func In(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return unicode.Is(rt, r) }) +} + +// In creates a Set with a Contains method that returns true for all runes not +// in the given RangeTable. +func NotIn(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return !unicode.Is(rt, r) }) +} + +// Predicate creates a Set with a Contains method that returns f(r). +func Predicate(f func(rune) bool) Set { + return setFunc(f) +} + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + transform.Transformer +} + +// Bytes returns a new byte slice with the result of converting b using t. It +// calls Reset on t. It returns nil if any error was found. This can only happen +// if an error-producing Transformer is passed to If. +func (t Transformer) Bytes(b []byte) []byte { + b, _, err := transform.Bytes(t, b) + if err != nil { + return nil + } + return b +} + +// String returns a string with the result of converting s using t. It calls +// Reset on t. It returns the empty string if any error was found. This can only +// happen if an error-producing Transformer is passed to If. +func (t Transformer) String(s string) string { + s, _, err := transform.String(t, s) + if err != nil { + return "" + } + return s +} + +// TODO: +// - Copy: copying strings and bytes in whole-rune units. +// - Validation (maybe) +// - Well-formed-ness (maybe) + +const runeErrorString = string(utf8.RuneError) + +// Remove returns a Transformer that removes runes r for which s.Contains(r). +// Illegal input bytes are replaced by RuneError before being passed to f. +func Remove(s Set) Transformer { + if f, ok := s.(setFunc); ok { + // This little trick cuts the running time of BenchmarkRemove for sets + // created by Predicate roughly in half. + // TODO: special-case RangeTables as well. + return Transformer{remove(f)} + } + return Transformer{remove(s.Contains)} +} + +// TODO: remove transform.RemoveFunc. + +type remove func(r rune) bool + +func (remove) Reset() {} + +// Transform implements transform.Transformer. +func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + size = 1 + } else { + r, size = utf8.DecodeRune(src[nSrc:]) + + if size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(utf8.RuneError) { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + } + nSrc++ + continue + } + } + + if t(r) { + nSrc += size + continue + } + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + } + return +} + +// Map returns a Transformer that maps the runes in the input using the given +// mapping. Illegal bytes in the input are converted to utf8.RuneError before +// being passed to the mapping func. +func Map(mapping func(rune) rune) Transformer { + return Transformer{mapper(mapping)} +} + +type mapper func(rune) rune + +func (mapper) Reset() {} + +// Transform implements transform.Transformer. +func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + var replacement rune + var b [utf8.UTFMax]byte + + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + if replacement = t(r); replacement < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = byte(replacement) + nDst++ + nSrc++ + continue + } + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + if replacement = t(utf8.RuneError); replacement == utf8.RuneError { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + continue + } + } else if replacement = t(r); replacement == r { + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + continue + } + + n := utf8.EncodeRune(b[:], replacement) + + if nDst+n > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < n; i++ { + dst[nDst] = b[i] + nDst++ + } + nSrc += size + } + return +} + +// ReplaceIllFormed returns a transformer that replaces all input bytes that are +// not part of a well-formed UTF-8 code sequence with utf8.RuneError. +func ReplaceIllFormed() Transformer { + return Transformer{&replaceIllFormed{}} +} + +type replaceIllFormed struct{ transform.NopResetter } + +func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + r, size := utf8.DecodeRune(src[nSrc:]) + + // Look for an ASCII rune. + if r < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = byte(r) + nDst++ + nSrc++ + continue + } + + // Look for a valid non-ASCII rune. + if r != utf8.RuneError || size != 1 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + err = transform.ErrShortDst + break + } + nDst += size + nSrc += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go new file mode 100644 index 000000000..0c2f730ea --- /dev/null +++ b/vendor/golang.org/x/text/transform/transform.go @@ -0,0 +1,661 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transform provides reader and writer wrappers that transform the +// bytes passing through as well as various transformations. Example +// transformations provided by other packages include normalization and +// conversion between character sets. +package transform // import "golang.org/x/text/transform" + +import ( + "bytes" + "errors" + "io" + "unicode/utf8" +) + +var ( + // ErrShortDst means that the destination buffer was too short to + // receive all of the transformed bytes. + ErrShortDst = errors.New("transform: short destination buffer") + + // ErrShortSrc means that the source buffer has insufficient data to + // complete the transformation. + ErrShortSrc = errors.New("transform: short source buffer") + + // errInconsistentByteCount means that Transform returned success (nil + // error) but also returned nSrc inconsistent with the src argument. + errInconsistentByteCount = errors.New("transform: inconsistent byte count returned") + + // errShortInternal means that an internal buffer is not large enough + // to make progress and the Transform operation must be aborted. + errShortInternal = errors.New("transform: short internal buffer") +) + +// Transformer transforms bytes. +type Transformer interface { + // Transform writes to dst the transformed bytes read from src, and + // returns the number of dst bytes written and src bytes read. The + // atEOF argument tells whether src represents the last bytes of the + // input. + // + // Callers should always process the nDst bytes produced and account + // for the nSrc bytes consumed before considering the error err. + // + // A nil error means that all of the transformed bytes (whether freshly + // transformed from src or left over from previous Transform calls) + // were written to dst. A nil error can be returned regardless of + // whether atEOF is true. If err is nil then nSrc must equal len(src); + // the converse is not necessarily true. + // + // ErrShortDst means that dst was too short to receive all of the + // transformed bytes. ErrShortSrc means that src had insufficient data + // to complete the transformation. If both conditions apply, then + // either error may be returned. Other than the error conditions listed + // here, implementations are free to report other errors that arise. + Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) + + // Reset resets the state and allows a Transformer to be reused. + Reset() +} + +// NopResetter can be embedded by implementations of Transformer to add a nop +// Reset method. +type NopResetter struct{} + +// Reset implements the Reset method of the Transformer interface. +func (NopResetter) Reset() {} + +// Reader wraps another io.Reader by transforming the bytes read. +type Reader struct { + r io.Reader + t Transformer + err error + + // dst[dst0:dst1] contains bytes that have been transformed by t but + // not yet copied out via Read. + dst []byte + dst0, dst1 int + + // src[src0:src1] contains bytes that have been read from r but not + // yet transformed through t. + src []byte + src0, src1 int + + // transformComplete is whether the transformation is complete, + // regardless of whether or not it was successful. + transformComplete bool +} + +const defaultBufSize = 4096 + +// NewReader returns a new Reader that wraps r by transforming the bytes read +// via t. It calls Reset on t. +func NewReader(r io.Reader, t Transformer) *Reader { + t.Reset() + return &Reader{ + r: r, + t: t, + dst: make([]byte, defaultBufSize), + src: make([]byte, defaultBufSize), + } +} + +// Read implements the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + n, err := 0, error(nil) + for { + // Copy out any transformed bytes and return the final error if we are done. + if r.dst0 != r.dst1 { + n = copy(p, r.dst[r.dst0:r.dst1]) + r.dst0 += n + if r.dst0 == r.dst1 && r.transformComplete { + return n, r.err + } + return n, nil + } else if r.transformComplete { + return 0, r.err + } + + // Try to transform some source bytes, or to flush the transformer if we + // are out of source bytes. We do this even if r.r.Read returned an error. + // As the io.Reader documentation says, "process the n > 0 bytes returned + // before considering the error". + if r.src0 != r.src1 || r.err != nil { + r.dst0 = 0 + r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF) + r.src0 += n + + switch { + case err == nil: + if r.src0 != r.src1 { + r.err = errInconsistentByteCount + } + // The Transform call was successful; we are complete if we + // cannot read more bytes into src. + r.transformComplete = r.err != nil + continue + case err == ErrShortDst && (r.dst1 != 0 || n != 0): + // Make room in dst by copying out, and try again. + continue + case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil: + // Read more bytes into src via the code below, and try again. + default: + r.transformComplete = true + // The reader error (r.err) takes precedence over the + // transformer error (err) unless r.err is nil or io.EOF. + if r.err == nil || r.err == io.EOF { + r.err = err + } + continue + } + } + + // Move any untransformed source bytes to the start of the buffer + // and read more bytes. + if r.src0 != 0 { + r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1]) + } + n, r.err = r.r.Read(r.src[r.src1:]) + r.src1 += n + } +} + +// TODO: implement ReadByte (and ReadRune??). + +// Writer wraps another io.Writer by transforming the bytes read. +// The user needs to call Close to flush unwritten bytes that may +// be buffered. +type Writer struct { + w io.Writer + t Transformer + dst []byte + + // src[:n] contains bytes that have not yet passed through t. + src []byte + n int +} + +// NewWriter returns a new Writer that wraps w by transforming the bytes written +// via t. It calls Reset on t. +func NewWriter(w io.Writer, t Transformer) *Writer { + t.Reset() + return &Writer{ + w: w, + t: t, + dst: make([]byte, defaultBufSize), + src: make([]byte, defaultBufSize), + } +} + +// Write implements the io.Writer interface. If there are not enough +// bytes available to complete a Transform, the bytes will be buffered +// for the next write. Call Close to convert the remaining bytes. +func (w *Writer) Write(data []byte) (n int, err error) { + src := data + if w.n > 0 { + // Append bytes from data to the last remainder. + // TODO: limit the amount copied on first try. + n = copy(w.src[w.n:], data) + w.n += n + src = w.src[:w.n] + } + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, false) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return n, werr + } + src = src[nSrc:] + if w.n == 0 { + n += nSrc + } else if len(src) <= n { + // Enough bytes from w.src have been consumed. We make src point + // to data instead to reduce the copying. + w.n = 0 + n -= len(src) + src = data[n:] + if n < len(data) && (err == nil || err == ErrShortSrc) { + continue + } + } + switch err { + case ErrShortDst: + // This error is okay as long as we are making progress. + if nDst > 0 || nSrc > 0 { + continue + } + case ErrShortSrc: + if len(src) < len(w.src) { + m := copy(w.src, src) + // If w.n > 0, bytes from data were already copied to w.src and n + // was already set to the number of bytes consumed. + if w.n == 0 { + n += m + } + w.n = m + err = nil + } else if nDst > 0 || nSrc > 0 { + // Not enough buffer to store the remainder. Keep processing as + // long as there is progress. Without this case, transforms that + // require a lookahead larger than the buffer may result in an + // error. This is not something one may expect to be common in + // practice, but it may occur when buffers are set to small + // sizes during testing. + continue + } + case nil: + if w.n > 0 { + err = errInconsistentByteCount + } + } + return n, err + } +} + +// Close implements the io.Closer interface. +func (w *Writer) Close() error { + src := w.src[:w.n] + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, true) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return werr + } + if err != ErrShortDst { + return err + } + src = src[nSrc:] + } +} + +type nop struct{ NopResetter } + +func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := copy(dst, src) + if n < len(src) { + err = ErrShortDst + } + return n, n, err +} + +type discard struct{ NopResetter } + +func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return 0, len(src), nil +} + +var ( + // Discard is a Transformer for which all Transform calls succeed + // by consuming all bytes and writing nothing. + Discard Transformer = discard{} + + // Nop is a Transformer that copies src to dst. + Nop Transformer = nop{} +) + +// chain is a sequence of links. A chain with N Transformers has N+1 links and +// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst +// buffers given to chain.Transform and the middle N-1 buffers are intermediate +// buffers owned by the chain. The i'th link transforms bytes from the i'th +// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer +// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N). +type chain struct { + link []link + err error + // errStart is the index at which the error occurred plus 1. Processing + // errStart at this level at the next call to Transform. As long as + // errStart > 0, chain will not consume any more source bytes. + errStart int +} + +func (c *chain) fatalError(errIndex int, err error) { + if i := errIndex + 1; i > c.errStart { + c.errStart = i + c.err = err + } +} + +type link struct { + t Transformer + // b[p:n] holds the bytes to be transformed by t. + b []byte + p int + n int +} + +func (l *link) src() []byte { + return l.b[l.p:l.n] +} + +func (l *link) dst() []byte { + return l.b[l.n:] +} + +// Chain returns a Transformer that applies t in sequence. +func Chain(t ...Transformer) Transformer { + if len(t) == 0 { + return nop{} + } + c := &chain{link: make([]link, len(t)+1)} + for i, tt := range t { + c.link[i].t = tt + } + // Allocate intermediate buffers. + b := make([][defaultBufSize]byte, len(t)-1) + for i := range b { + c.link[i+1].b = b[i][:] + } + return c +} + +// Reset resets the state of Chain. It calls Reset on all the Transformers. +func (c *chain) Reset() { + for i, l := range c.link { + if l.t != nil { + l.t.Reset() + } + c.link[i].p, c.link[i].n = 0, 0 + } +} + +// Transform applies the transformers of c in sequence. +func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + // Set up src and dst in the chain. + srcL := &c.link[0] + dstL := &c.link[len(c.link)-1] + srcL.b, srcL.p, srcL.n = src, 0, len(src) + dstL.b, dstL.n = dst, 0 + var lastFull, needProgress bool // for detecting progress + + // i is the index of the next Transformer to apply, for i in [low, high]. + // low is the lowest index for which c.link[low] may still produce bytes. + // high is the highest index for which c.link[high] has a Transformer. + // The error returned by Transform determines whether to increase or + // decrease i. We try to completely fill a buffer before converting it. + for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; { + in, out := &c.link[i], &c.link[i+1] + nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i) + out.n += nDst + in.p += nSrc + if i > 0 && in.p == in.n { + in.p, in.n = 0, 0 + } + needProgress, lastFull = lastFull, false + switch err0 { + case ErrShortDst: + // Process the destination buffer next. Return if we are already + // at the high index. + if i == high { + return dstL.n, srcL.p, ErrShortDst + } + if out.n != 0 { + i++ + // If the Transformer at the next index is not able to process any + // source bytes there is nothing that can be done to make progress + // and the bytes will remain unprocessed. lastFull is used to + // detect this and break out of the loop with a fatal error. + lastFull = true + continue + } + // The destination buffer was too small, but is completely empty. + // Return a fatal error as this transformation can never complete. + c.fatalError(i, errShortInternal) + case ErrShortSrc: + if i == 0 { + // Save ErrShortSrc in err. All other errors take precedence. + err = ErrShortSrc + break + } + // Source bytes were depleted before filling up the destination buffer. + // Verify we made some progress, move the remaining bytes to the errStart + // and try to get more source bytes. + if needProgress && nSrc == 0 || in.n-in.p == len(in.b) { + // There were not enough source bytes to proceed while the source + // buffer cannot hold any more bytes. Return a fatal error as this + // transformation can never complete. + c.fatalError(i, errShortInternal) + break + } + // in.b is an internal buffer and we can make progress. + in.p, in.n = 0, copy(in.b, in.src()) + fallthrough + case nil: + // if i == low, we have depleted the bytes at index i or any lower levels. + // In that case we increase low and i. In all other cases we decrease i to + // fetch more bytes before proceeding to the next index. + if i > low { + i-- + continue + } + default: + c.fatalError(i, err0) + } + // Exhausted level low or fatal error: increase low and continue + // to process the bytes accepted so far. + i++ + low = i + } + + // If c.errStart > 0, this means we found a fatal error. We will clear + // all upstream buffers. At this point, no more progress can be made + // downstream, as Transform would have bailed while handling ErrShortDst. + if c.errStart > 0 { + for i := 1; i < c.errStart; i++ { + c.link[i].p, c.link[i].n = 0, 0 + } + err, c.errStart, c.err = c.err, 0, nil + } + return dstL.n, srcL.p, err +} + +// RemoveFunc returns a Transformer that removes from the input all runes r for +// which f(r) is true. Illegal bytes in the input are replaced by RuneError. +func RemoveFunc(f func(r rune) bool) Transformer { + return removeF(f) +} + +type removeF func(r rune) bool + +func (removeF) Reset() {} + +// Transform implements the Transformer interface. +func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] { + + if r = rune(src[0]); r < utf8.RuneSelf { + sz = 1 + } else { + r, sz = utf8.DecodeRune(src) + + if sz == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src) { + err = ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(r) { + if nDst+3 > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], "\uFFFD") + } + nSrc++ + continue + } + } + + if !t(r) { + if nDst+sz > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], src[:sz]) + } + nSrc += sz + } + return +} + +// grow returns a new []byte that is longer than b, and copies the first n bytes +// of b to the start of the new slice. +func grow(b []byte, n int) []byte { + m := len(b) + if m <= 32 { + m = 64 + } else if m <= 256 { + m *= 2 + } else { + m += m >> 1 + } + buf := make([]byte, m) + copy(buf, b[:n]) + return buf +} + +const initialBufSize = 128 + +// String returns a string with the result of converting s[:n] using t, where +// n <= len(s). If err == nil, n will be len(s). It calls Reset on t. +func String(t Transformer, s string) (result string, n int, err error) { + t.Reset() + if s == "" { + // Fast path for the common case for empty input. Results in about a + // 86% reduction of running time for BenchmarkStringLowerEmpty. + if _, _, err := t.Transform(nil, nil, true); err == nil { + return "", 0, nil + } + } + + // Allocate only once. Note that both dst and src escape when passed to + // Transform. + buf := [2 * initialBufSize]byte{} + dst := buf[:initialBufSize:initialBufSize] + src := buf[initialBufSize : 2*initialBufSize] + + // The input string s is transformed in multiple chunks (starting with a + // chunk size of initialBufSize). nDst and nSrc are per-chunk (or + // per-Transform-call) indexes, pDst and pSrc are overall indexes. + nDst, nSrc := 0, 0 + pDst, pSrc := 0, 0 + + // pPrefix is the length of a common prefix: the first pPrefix bytes of the + // result will equal the first pPrefix bytes of s. It is not guaranteed to + // be the largest such value, but if pPrefix, len(result) and len(s) are + // all equal after the final transform (i.e. calling Transform with atEOF + // being true returned nil error) then we don't need to allocate a new + // result string. + pPrefix := 0 + for { + // Invariant: pDst == pPrefix && pSrc == pPrefix. + + n := copy(src, s[pSrc:]) + nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // TODO: let transformers implement an optional Spanner interface, akin + // to norm's QuickSpan. This would even allow us to avoid any allocation. + if !bytes.Equal(dst[:nDst], src[:nSrc]) { + break + } + pPrefix = pSrc + if err == ErrShortDst { + // A buffer can only be short if a transformer modifies its input. + break + } else if err == ErrShortSrc { + if nSrc == 0 { + // No progress was made. + break + } + // Equal so far and !atEOF, so continue checking. + } else if err != nil || pPrefix == len(s) { + return string(s[:pPrefix]), pPrefix, err + } + } + // Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc. + + // We have transformed the first pSrc bytes of the input s to become pDst + // transformed bytes. Those transformed bytes are discontiguous: the first + // pPrefix of them equal s[:pPrefix] and the last nDst of them equal + // dst[:nDst]. We copy them around, into a new dst buffer if necessary, so + // that they become one contiguous slice: dst[:pDst]. + if pPrefix != 0 { + newDst := dst + if pDst > len(newDst) { + newDst = make([]byte, len(s)+nDst-nSrc) + } + copy(newDst[pPrefix:pDst], dst[:nDst]) + copy(newDst[:pPrefix], s[:pPrefix]) + dst = newDst + } + + // Prevent duplicate Transform calls with atEOF being true at the end of + // the input. Also return if we have an unrecoverable error. + if (err == nil && pSrc == len(s)) || + (err != nil && err != ErrShortDst && err != ErrShortSrc) { + return string(dst[:pDst]), pSrc, err + } + + // Transform the remaining input, growing dst and src buffers as necessary. + for { + n := copy(src, s[pSrc:]) + nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // If we got ErrShortDst or ErrShortSrc, do not grow as long as we can + // make progress. This may avoid excessive allocations. + if err == ErrShortDst { + if nDst == 0 { + dst = grow(dst, pDst) + } + } else if err == ErrShortSrc { + if nSrc == 0 { + src = grow(src, 0) + } + } else if err != nil || pSrc == len(s) { + return string(dst[:pDst]), pSrc, err + } + } +} + +// Bytes returns a new byte slice with the result of converting b[:n] using t, +// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t. +func Bytes(t Transformer, b []byte) (result []byte, n int, err error) { + return doAppend(t, 0, make([]byte, len(b)), b) +} + +// Append appends the result of converting src[:n] using t to dst, where +// n <= len(src), If err == nil, n will be len(src). It calls Reset on t. +func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) { + if len(dst) == cap(dst) { + n := len(src) + len(dst) // It is okay for this to be 0. + b := make([]byte, n) + dst = b[:copy(b, dst)] + } + return doAppend(t, len(dst), dst[:cap(dst)], src) +} + +func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) { + t.Reset() + pSrc := 0 + for { + nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true) + pDst += nDst + pSrc += nSrc + if err != ErrShortDst { + return dst[:pDst], pSrc, err + } + + // Grow the destination buffer, but do not grow as long as we can make + // progress. This may avoid excessive allocations. + if nDst == 0 { + dst = grow(dst, pDst) + } + } +} diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go new file mode 100644 index 000000000..21821791e --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/base.go @@ -0,0 +1,110 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cldr provides a parser for LDML and related XML formats. +// This package is inteded to be used by the table generation tools +// for the various internationalization-related packages. +// As the XML types are generated from the CLDR DTD, and as the CLDR standard +// is periodically amended, this package may change considerably over time. +// This mostly means that data may appear and disappear between versions. +// That is, old code should keep compiling for newer versions, but data +// may have moved or changed. +// CLDR version 22 is the first version supported by this package. +// Older versions may not work. +package cldr + +import ( + "encoding/xml" + "regexp" + "strconv" +) + +// Elem is implemented by every XML element. +type Elem interface { + setEnclosing(Elem) + setName(string) + enclosing() Elem + + GetCommon() *Common +} + +type hidden struct { + CharData string `xml:",chardata"` + Alias *struct { + Common + Source string `xml:"source,attr"` + Path string `xml:"path,attr"` + } `xml:"alias"` + Def *struct { + Common + Choice string `xml:"choice,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + } `xml:"default"` +} + +// Common holds several of the most common attributes and sub elements +// of an XML element. +type Common struct { + XMLName xml.Name + name string + enclElem Elem + Type string `xml:"type,attr,omitempty"` + Reference string `xml:"reference,attr,omitempty"` + Alt string `xml:"alt,attr,omitempty"` + ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` + Draft string `xml:"draft,attr,omitempty"` + hidden +} + +// Default returns the default type to select from the enclosed list +// or "" if no default value is specified. +func (e *Common) Default() string { + if e.Def == nil { + return "" + } + if e.Def.Choice != "" { + return e.Def.Choice + } else if e.Def.Type != "" { + // Type is still used by the default element in collation. + return e.Def.Type + } + return "" +} + +// GetCommon returns e. It is provided such that Common implements Elem. +func (e *Common) GetCommon() *Common { + return e +} + +// Data returns the character data accumulated for this element. +func (e *Common) Data() string { + e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) + return e.CharData +} + +func (e *Common) setName(s string) { + e.name = s +} + +func (e *Common) enclosing() Elem { + return e.enclElem +} + +func (e *Common) setEnclosing(en Elem) { + e.enclElem = en +} + +// Escape characters that can be escaped without further escaping the string. +var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) + +// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. +// It assumes the input string is correctly formatted. +func replaceUnicode(s string) string { + if s[1] == '#' { + r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) + return string(r) + } + r, _, _, _ := strconv.UnquoteChar(s, 0) + return string(r) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go new file mode 100644 index 000000000..ea3fe139e --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/cldr.go @@ -0,0 +1,130 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run makexml.go -output xml.go + +// Package cldr provides a parser for LDML and related XML formats. +// This package is inteded to be used by the table generation tools +// for the various internationalization-related packages. +// As the XML types are generated from the CLDR DTD, and as the CLDR standard +// is periodically amended, this package may change considerably over time. +// This mostly means that data may appear and disappear between versions. +// That is, old code should keep compiling for newer versions, but data +// may have moved or changed. +// CLDR version 22 is the first version supported by this package. +// Older versions may not work. +package cldr // import "golang.org/x/text/unicode/cldr" + +import ( + "fmt" + "sort" +) + +// CLDR provides access to parsed data of the Unicode Common Locale Data Repository. +type CLDR struct { + parent map[string][]string + locale map[string]*LDML + resolved map[string]*LDML + bcp47 *LDMLBCP47 + supp *SupplementalData +} + +func makeCLDR() *CLDR { + return &CLDR{ + parent: make(map[string][]string), + locale: make(map[string]*LDML), + resolved: make(map[string]*LDML), + bcp47: &LDMLBCP47{}, + supp: &SupplementalData{}, + } +} + +// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. +func (cldr *CLDR) BCP47() *LDMLBCP47 { + return nil +} + +// Draft indicates the draft level of an element. +type Draft int + +const ( + Approved Draft = iota + Contributed + Provisional + Unconfirmed +) + +var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} + +// ParseDraft returns the Draft value corresponding to the given string. The +// empty string corresponds to Approved. +func ParseDraft(level string) (Draft, error) { + if level == "" { + return Approved, nil + } + for i, s := range drafts { + if level == s { + return Unconfirmed - Draft(i), nil + } + } + return Approved, fmt.Errorf("cldr: unknown draft level %q", level) +} + +func (d Draft) String() string { + return drafts[len(drafts)-1-int(d)] +} + +// SetDraftLevel sets which draft levels to include in the evaluated LDML. +// Any draft element for which the draft level is higher than lev will be excluded. +// If multiple draft levels are available for a single element, the one with the +// lowest draft level will be selected, unless preferDraft is true, in which case +// the highest draft will be chosen. +// It is assumed that the underlying LDML is canonicalized. +func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { + // TODO: implement + cldr.resolved = make(map[string]*LDML) +} + +// RawLDML returns the LDML XML for id in unresolved form. +// id must be one of the strings returned by Locales. +func (cldr *CLDR) RawLDML(loc string) *LDML { + return cldr.locale[loc] +} + +// LDML returns the fully resolved LDML XML for loc, which must be one of +// the strings returned by Locales. +func (cldr *CLDR) LDML(loc string) (*LDML, error) { + return cldr.resolve(loc) +} + +// Supplemental returns the parsed supplemental data. If no such data was parsed, +// nil is returned. +func (cldr *CLDR) Supplemental() *SupplementalData { + return cldr.supp +} + +// Locales returns the locales for which there exist files. +// Valid sublocales for which there is no file are not included. +// The root locale is always sorted first. +func (cldr *CLDR) Locales() []string { + loc := []string{"root"} + hasRoot := false + for l, _ := range cldr.locale { + if l == "root" { + hasRoot = true + continue + } + loc = append(loc, l) + } + sort.Strings(loc[1:]) + if !hasRoot { + return loc[1:] + } + return loc +} + +// Get fills in the fields of x based on the XPath path. +func Get(e Elem, path string) (res Elem, err error) { + return walkXPath(e, path) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/collate.go b/vendor/golang.org/x/text/unicode/cldr/collate.go new file mode 100644 index 000000000..80ee28d79 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/collate.go @@ -0,0 +1,359 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// RuleProcessor can be passed to Collator's Process method, which +// parses the rules and calls the respective method for each rule found. +type RuleProcessor interface { + Reset(anchor string, before int) error + Insert(level int, str, context, extend string) error + Index(id string) +} + +const ( + // cldrIndex is a Unicode-reserved sentinel value used to mark the start + // of a grouping within an index. + // We ignore any rule that starts with this rune. + // See http://unicode.org/reports/tr35/#Collation_Elements for details. + cldrIndex = "\uFDD0" + + // specialAnchor is the format in which to represent logical reset positions, + // such as "first tertiary ignorable". + specialAnchor = "<%s/>" +) + +// Process parses the rules for the tailorings of this collation +// and calls the respective methods of p for each rule found. +func (c Collation) Process(p RuleProcessor) (err error) { + if len(c.Cr) > 0 { + if len(c.Cr) > 1 { + return fmt.Errorf("multiple cr elements, want 0 or 1") + } + return processRules(p, c.Cr[0].Data()) + } + if c.Rules.Any != nil { + return c.processXML(p) + } + return errors.New("no tailoring data") +} + +// processRules parses rules in the Collation Rule Syntax defined in +// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. +func processRules(p RuleProcessor, s string) (err error) { + chk := func(s string, e error) string { + if err == nil { + err = e + } + return s + } + i := 0 // Save the line number for use after the loop. + scanner := bufio.NewScanner(strings.NewReader(s)) + for ; scanner.Scan() && err == nil; i++ { + for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { + level := 5 + var ch byte + switch ch, s = s[0], s[1:]; ch { + case '&': // followed by or '[' ']' + if s = skipSpace(s); consume(&s, '[') { + s = chk(parseSpecialAnchor(p, s)) + } else { + s = chk(parseAnchor(p, 0, s)) + } + case '<': // sort relation '<'{1,4}, optionally followed by '*'. + for level = 1; consume(&s, '<'); level++ { + } + if level > 4 { + err = fmt.Errorf("level %d > 4", level) + } + fallthrough + case '=': // identity relation, optionally followed by *. + if consume(&s, '*') { + s = chk(parseSequence(p, level, s)) + } else { + s = chk(parseOrder(p, level, s)) + } + default: + chk("", fmt.Errorf("illegal operator %q", ch)) + break + } + } + } + if chk("", scanner.Err()); err != nil { + return fmt.Errorf("%d: %v", i, err) + } + return nil +} + +// parseSpecialAnchor parses the anchor syntax which is either of the form +// ['before' ] +// or +// [