Merge pull request #73435 from micahhausler/aws-sdk-update-201901

Updated AWS SDK to v1.16.26 for ECR PrivateLink support
pull/564/head
Kubernetes Prow Robot 2019-02-04 11:43:03 -08:00 committed by GitHub
commit 10a4d5aa8a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
115 changed files with 26996 additions and 3288 deletions

155
Godeps/Godeps.json generated
View File

@ -201,178 +201,193 @@
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/processcreds",
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/csm",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/endpoints",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/ini",
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/sdkio",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/sdkrand",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/sdkuri",
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/shareddefaults",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ecr",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elb",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elbv2",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/kms",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sts",
"Comment": "v1.14.12",
"Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4"
"Comment": "v1.16.26",
"Rev": "81f3829f5a9d041041bdf56e55926691309d7699"
},
{
"ImportPath": "github.com/bazelbuild/bazel-gazelle/cmd/gazelle",

630
Godeps/LICENSES generated
View File

@ -2601,6 +2601,216 @@ SOFTWARE.
================================================================================
================================================================================
= vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/github.com/aws/aws-sdk-go/LICENSE.txt 3b83ef96387f14655fc854ddc3c6bd57
================================================================================
================================================================================
= vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds licensed under: =
@ -4281,6 +4491,216 @@ SOFTWARE.
================================================================================
================================================================================
= vendor/github.com/aws/aws-sdk-go/internal/ini licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/github.com/aws/aws-sdk-go/LICENSE.txt 3b83ef96387f14655fc854ddc3c6bd57
================================================================================
================================================================================
= vendor/github.com/aws/aws-sdk-go/internal/sdkio licensed under: =
@ -4701,6 +5121,216 @@ SOFTWARE.
================================================================================
================================================================================
= vendor/github.com/aws/aws-sdk-go/internal/sdkuri licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/github.com/aws/aws-sdk-go/LICENSE.txt 3b83ef96387f14655fc854ddc3c6bd57
================================================================================
================================================================================
= vendor/github.com/aws/aws-sdk-go/internal/shareddefaults licensed under: =

2
vendor/BUILD vendored
View File

@ -38,8 +38,10 @@ filegroup(
"//vendor/github.com/armon/circbuf:all-srcs",
"//vendor/github.com/asaskevich/govalidator:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/aws:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/internal/ini:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/internal/sdkio:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/internal/sdkrand:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/internal/sdkuri:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/internal/shareddefaults:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/private/protocol:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling:all-srcs",

View File

@ -4,9 +4,11 @@ go_library(
name = "go_default_library",
srcs = [
"config.go",
"context.go",
"context_1_6.go",
"context_1_7.go",
"context_1_5.go",
"context_1_9.go",
"context_background_1_5.go",
"context_background_1_7.go",
"context_sleep.go",
"convert_types.go",
"doc.go",
"errors.go",

View File

@ -23,28 +23,27 @@ func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
case reflect.Struct:
buf.WriteString("{\n")
names := []string{}
for i := 0; i < v.Type().NumField(); i++ {
name := v.Type().Field(i).Name
f := v.Field(i)
if name[0:1] == strings.ToLower(name[0:1]) {
ft := v.Type().Field(i)
fv := v.Field(i)
if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
continue // ignore unexported fields
}
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
continue // ignore unset fields
}
names = append(names, name)
}
for i, n := range names {
val := v.FieldByName(n)
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
stringValue(val, indent+2, buf)
buf.WriteString(ft.Name + ": ")
if i < len(names)-1 {
buf.WriteString(",\n")
if tag := ft.Tag.Get("sensitive"); tag == "true" {
buf.WriteString("<sensitive>")
} else {
stringValue(fv, indent+2, buf)
}
buf.WriteString(",\n")
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")

View File

@ -18,7 +18,7 @@ type Config struct {
// States that the signing name did not come from a modeled source but
// was derived based on other data. Used by service client constructors
// to determine if the signin name can be overriden based on metadata the
// to determine if the signin name can be overridden based on metadata the
// service has.
SigningNameDerived bool
}

View File

@ -18,7 +18,7 @@ const UseServiceDefaultRetries = -1
type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig tructure.
// all clients will use the defaults.DefaultConfig structure.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.
@ -45,8 +45,8 @@ type Config struct {
// that overrides the default generated endpoint for a client. Set this
// to `""` to use the default generated endpoint.
//
// @note You must still provide a `Region` value when specifying an
// endpoint for a client.
// Note: You must still provide a `Region` value when specifying an
// endpoint for a client.
Endpoint *string
// The resolver to use for looking up endpoints for AWS service clients
@ -65,8 +65,8 @@ type Config struct {
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
// AWS Regions and Endpoints
// See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
// Regions and Endpoints.
Region *string
// Set this to `true` to disable SSL when sending requests. Defaults
@ -120,9 +120,10 @@ type Config struct {
// will use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
//
// @note This configuration option is specific to the Amazon S3 service.
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// Amazon S3: Virtual Hosting of Buckets
// Note: This configuration option is specific to the Amazon S3 service.
//
// See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// for Amazon S3: Virtual Hosting of Buckets
S3ForcePathStyle *bool
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
@ -223,6 +224,28 @@ type Config struct {
// Key: aws.String("//foo//bar//moo"),
// })
DisableRestProtocolURICleaning *bool
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
// have the definition in its model. By default, endpoint discovery is off.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
// EnableEndpointDiscovery: aws.Bool(true),
// }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("/foo/bar/moo"),
// })
EnableEndpointDiscovery *bool
// DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
// request endpoint hosts with modeled information.
//
// Disabling this feature is useful when you want to use local endpoints
// for testing that do not support the modeled host prefix pattern.
DisableEndpointHostPrefix *bool
}
// NewConfig returns a new Config pointer that can be chained with builder
@ -377,6 +400,19 @@ func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
return c
}
// WithEndpointDiscovery will set whether or not to use endpoint discovery.
func (c *Config) WithEndpointDiscovery(t bool) *Config {
c.EnableEndpointDiscovery = &t
return c
}
// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
// when making requests.
func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
c.DisableEndpointHostPrefix = &t
return c
}
// MergeIn merges the passed in configs into the existing config object.
func (c *Config) MergeIn(cfgs ...*Config) {
for _, other := range cfgs {
@ -476,6 +512,14 @@ func mergeInConfig(dst *Config, other *Config) {
if other.EnforceShouldRetryCheck != nil {
dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
}
if other.EnableEndpointDiscovery != nil {
dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
}
if other.DisableEndpointHostPrefix != nil {
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
}
}
// Copy will return a shallow copy of the Config object. If any additional

View File

@ -1,8 +1,8 @@
// +build !go1.9
package aws
import (
"time"
)
import "time"
// Context is an copy of the Go v1.7 stdlib's context.Context interface.
// It is represented as a SDK interface to enable you to use the "WithContext"
@ -35,37 +35,3 @@ type Context interface {
// functions.
Value(key interface{}) interface{}
}
// BackgroundContext returns a context that will never be canceled, has no
// values, and no deadline. This context is used by the SDK to provide
// backwards compatibility with non-context API operations and functionality.
//
// Go 1.6 and before:
// This context function is equivalent to context.Background in the Go stdlib.
//
// Go 1.7 and later:
// The context returned will be the value returned by context.Background()
//
// See https://golang.org/pkg/context for more information on Contexts.
func BackgroundContext() Context {
return backgroundCtx
}
// SleepWithContext will wait for the timer duration to expire, or the context
// is canceled. Which ever happens first. If the context is canceled the Context's
// error will be returned.
//
// Expects Context to always return a non-nil error if the Done channel is closed.
func SleepWithContext(ctx Context, dur time.Duration) error {
t := time.NewTimer(dur)
defer t.Stop()
select {
case <-t.C:
break
case <-ctx.Done():
return ctx.Err()
}
return nil
}

View File

@ -1,9 +0,0 @@
// +build go1.7
package aws
import "context"
var (
backgroundCtx = context.Background()
)

11
vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build go1.9
package aws
import "context"
// Context is an alias of the Go stdlib's context.Context interface.
// It can be used within the SDK's API operation "WithContext" methods.
//
// See https://golang.org/pkg/context on how to use contexts.
type Context = context.Context

View File

@ -39,3 +39,18 @@ func (e *emptyCtx) String() string {
var (
backgroundCtx = new(emptyCtx)
)
// BackgroundContext returns a context that will never be canceled, has no
// values, and no deadline. This context is used by the SDK to provide
// backwards compatibility with non-context API operations and functionality.
//
// Go 1.6 and before:
// This context function is equivalent to context.Background in the Go stdlib.
//
// Go 1.7 and later:
// The context returned will be the value returned by context.Background()
//
// See https://golang.org/pkg/context for more information on Contexts.
func BackgroundContext() Context {
return backgroundCtx
}

View File

@ -0,0 +1,20 @@
// +build go1.7
package aws
import "context"
// BackgroundContext returns a context that will never be canceled, has no
// values, and no deadline. This context is used by the SDK to provide
// backwards compatibility with non-context API operations and functionality.
//
// Go 1.6 and before:
// This context function is equivalent to context.Background in the Go stdlib.
//
// Go 1.7 and later:
// The context returned will be the value returned by context.Background()
//
// See https://golang.org/pkg/context for more information on Contexts.
func BackgroundContext() Context {
return context.Background()
}

24
vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package aws
import (
"time"
)
// SleepWithContext will wait for the timer duration to expire, or the context
// is canceled. Which ever happens first. If the context is canceled the Context's
// error will be returned.
//
// Expects Context to always return a non-nil error if the Done channel is closed.
func SleepWithContext(ctx Context, dur time.Duration) error {
t := time.NewTimer(dur)
defer t.Stop()
select {
case <-t.C:
break
case <-ctx.Done():
return ctx.Err()
}
return nil
}

View File

@ -72,9 +72,9 @@ var ValidateReqSigHandler = request.NamedHandler{
signedTime = r.LastSignedAt
}
// 10 minutes to allow for some clock skew/delays in transmission.
// 5 minutes to allow for some clock skew/delays in transmission.
// Would be improved with aws/aws-sdk-go#423
if signedTime.Add(10 * time.Minute).After(time.Now()) {
if signedTime.Add(5 * time.Minute).After(time.Now()) {
return
}

View File

@ -17,7 +17,7 @@ var SDKVersionUserAgentHandler = request.NamedHandler{
}
const execEnvVar = `AWS_EXECUTION_ENV`
const execEnvUAKey = `exec_env`
const execEnvUAKey = `exec-env`
// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
// execution environment to the user agent.

View File

@ -14,8 +14,8 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/internal/ini:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/internal/shareddefaults:go_default_library",
"//vendor/github.com/go-ini/ini:go_default_library",
],
)
@ -32,6 +32,7 @@ filegroup(
":package-srcs",
"//vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds:all-srcs",
"//vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds:all-srcs",
],
tags = ["automanaged"],

View File

@ -9,9 +9,7 @@ var (
// providers in the ChainProvider.
//
// This has been deprecated. For verbose error messaging set
// aws.Config.CredentialsChainVerboseErrors to true
//
// @readonly
// aws.Config.CredentialsChainVerboseErrors to true.
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
`no valid providers in chain. Deprecated.
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,

View File

@ -49,6 +49,8 @@
package credentials
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"sync"
"time"
)
@ -64,8 +66,6 @@ import (
// Credentials: credentials.AnonymousCredentials,
// })))
// // Access public S3 buckets.
//
// @readonly
var AnonymousCredentials = NewStaticCredentials("", "", "")
// A Value is the AWS credentials value for individual credential fields.
@ -99,6 +99,14 @@ type Provider interface {
IsExpired() bool
}
// An Expirer is an interface that Providers can implement to expose the expiration
// time, if known. If the Provider cannot accurately provide this info,
// it should not implement this interface.
type Expirer interface {
// The time at which the credentials are no longer valid
ExpiresAt() time.Time
}
// An ErrorProvider is a stub credentials provider that always returns an error
// this is used by the SDK when construction a known provider is not possible
// due to an error.
@ -158,13 +166,19 @@ func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
// IsExpired returns if the credentials are expired.
func (e *Expiry) IsExpired() bool {
if e.CurrentTime == nil {
e.CurrentTime = time.Now
curTime := e.CurrentTime
if curTime == nil {
curTime = time.Now
}
return e.expiration.Before(e.CurrentTime())
return e.expiration.Before(curTime())
}
// A Credentials provides synchronous safe retrieval of AWS credentials Value.
// ExpiresAt returns the expiration time of the credential
func (e *Expiry) ExpiresAt() time.Time {
return e.expiration
}
// A Credentials provides concurrency safe retrieval of AWS credentials Value.
// Credentials will cache the credentials value until they expire. Once the value
// expires the next Get will attempt to retrieve valid credentials.
//
@ -256,3 +270,23 @@ func (c *Credentials) IsExpired() bool {
func (c *Credentials) isExpired() bool {
return c.forceRefresh || c.provider.IsExpired()
}
// ExpiresAt provides access to the functionality of the Expirer interface of
// the underlying Provider, if it supports that interface. Otherwise, it returns
// an error.
func (c *Credentials) ExpiresAt() (time.Time, error) {
c.m.RLock()
defer c.m.RUnlock()
expirer, ok := c.provider.(Expirer)
if !ok {
return time.Time{}, awserr.New("ProviderNotExpirer",
fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName),
nil)
}
if c.forceRefresh {
// set expiration time to the distant past
return time.Time{}, nil
}
return expirer.ExpiresAt(), nil
}

View File

@ -11,6 +11,7 @@ go_library(
"//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/ec2metadata:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/internal/sdkuri:go_default_library",
],
)

View File

@ -4,7 +4,6 @@ import (
"bufio"
"encoding/json"
"fmt"
"path"
"strings"
"time"
@ -12,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/internal/sdkuri"
)
// ProviderName provides a name of EC2Role provider
@ -125,7 +125,7 @@ type ec2RoleCredRespBody struct {
Message string
}
const iamSecurityCredsPath = "/iam/security-credentials"
const iamSecurityCredsPath = "iam/security-credentials/"
// requestCredList requests a list of credentials from the EC2 service.
// If there are no credentials, or there is an error making or receiving the request
@ -153,7 +153,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
// If the credentials cannot be found, or there is an error reading the response
// and error will be returned.
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
if err != nil {
return ec2RoleCredRespBody{},
awserr.New("EC2RoleRequestError",

View File

@ -65,6 +65,10 @@ type Provider struct {
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// Optional authorization token value if set will be used as the value of
// the Authorization header of the endpoint credential request.
AuthorizationToken string
}
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
@ -152,6 +156,9 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
out := &getCredentialsOutput{}
req := p.Client.NewRequest(op, nil, out)
req.HTTPRequest.Header.Set("Accept", "application/json")
if authToken := p.AuthorizationToken; len(authToken) != 0 {
req.HTTPRequest.Header.Set("Authorization", authToken)
}
return out, req.Send()
}

View File

@ -12,14 +12,10 @@ const EnvProviderName = "EnvProvider"
var (
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
// found in the process's environment.
//
// @readonly
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
// can't be found in the process's environment.
//
// @readonly
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
)

View File

@ -0,0 +1,27 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["provider.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds",
importpath = "github.com/aws/aws-sdk-go/aws/credentials/processcreds",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,425 @@
/*
Package processcreds is a credential Provider to retrieve `credential_process`
credentials.
WARNING: The following describes a method of sourcing credentials from an external
process. This can potentially be dangerous, so proceed with caution. Other
credential providers should be preferred if at all possible. If using this
option, you should make sure that the config file is as locked down as possible
using security best practices for your operating system.
You can use credentials from a `credential_process` in a variety of ways.
One way is to setup your shared config file, located in the default
location, with the `credential_process` key and the command you want to be
called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
[default]
credential_process = /command/to/call
Creating a new session will use the credential process to retrieve credentials.
NOTE: If there are credentials in the profile you are using, the credential
process will not be used.
// Initialize a session to load credentials.
sess, _ := session.NewSession(&aws.Config{
Region: aws.String("us-east-1")},
)
// Create S3 service client to use the credentials.
svc := s3.New(sess)
Another way to use the `credential_process` method is by using
`credentials.NewCredentials()` and providing a command to be executed to
retrieve credentials:
// Create credentials using the ProcessProvider.
creds := processcreds.NewCredentials("/path/to/command")
// Create service client value configured for credentials.
svc := s3.New(sess, &aws.Config{Credentials: creds})
You can set a non-default timeout for the `credential_process` with another
constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
set a one minute timeout:
// Create credentials using the ProcessProvider.
creds := processcreds.NewCredentialsTimeout(
"/path/to/command",
time.Duration(500) * time.Millisecond)
If you need more control, you can set any configurable options in the
credentials using one or more option functions. For example, you can set a two
minute timeout, a credential duration of 60 minutes, and a maximum stdout
buffer size of 2k.
creds := processcreds.NewCredentials(
"/path/to/command",
func(opt *ProcessProvider) {
opt.Timeout = time.Duration(2) * time.Minute
opt.Duration = time.Duration(60) * time.Minute
opt.MaxBufSize = 2048
})
You can also use your own `exec.Cmd`:
// Create an exec.Cmd
myCommand := exec.Command("/path/to/command")
// Create credentials using your exec.Cmd and custom timeout
creds := processcreds.NewCredentialsCommand(
myCommand,
func(opt *processcreds.ProcessProvider) {
opt.Timeout = time.Duration(1) * time.Second
})
*/
package processcreds
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"runtime"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
)
const (
// ProviderName is the name this credentials provider will label any
// returned credentials Value with.
ProviderName = `ProcessProvider`
// ErrCodeProcessProviderParse error parsing process output
ErrCodeProcessProviderParse = "ProcessProviderParseError"
// ErrCodeProcessProviderVersion version error in output
ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
// ErrCodeProcessProviderRequired required attribute missing in output
ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
// ErrCodeProcessProviderExecution execution of command failed
ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
// errMsgProcessProviderTimeout process took longer than allowed
errMsgProcessProviderTimeout = "credential process timed out"
// errMsgProcessProviderProcess process error
errMsgProcessProviderProcess = "error in credential_process"
// errMsgProcessProviderParse problem parsing output
errMsgProcessProviderParse = "parse failed of credential_process output"
// errMsgProcessProviderVersion version error in output
errMsgProcessProviderVersion = "wrong version in process output (not 1)"
// errMsgProcessProviderMissKey missing access key id in output
errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
// errMsgProcessProviderMissSecret missing secret acess key in output
errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
// errMsgProcessProviderPrepareCmd prepare of command failed
errMsgProcessProviderPrepareCmd = "failed to prepare command"
// errMsgProcessProviderEmptyCmd command must not be empty
errMsgProcessProviderEmptyCmd = "command must not be empty"
// errMsgProcessProviderPipe failed to initialize pipe
errMsgProcessProviderPipe = "failed to initialize pipe"
// DefaultDuration is the default amount of time in minutes that the
// credentials will be valid for.
DefaultDuration = time.Duration(15) * time.Minute
// DefaultBufSize limits buffer size from growing to an enormous
// amount due to a faulty process.
DefaultBufSize = 1024
// DefaultTimeout default limit on time a process can run.
DefaultTimeout = time.Duration(1) * time.Minute
)
// ProcessProvider satisfies the credentials.Provider interface, and is a
// client to retrieve credentials from a process.
type ProcessProvider struct {
staticCreds bool
credentials.Expiry
originalCommand []string
// Expiry duration of the credentials. Defaults to 15 minutes if not set.
Duration time.Duration
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// A string representing an os command that should return a JSON with
// credential information.
command *exec.Cmd
// MaxBufSize limits memory usage from growing to an enormous
// amount due to a faulty process.
MaxBufSize int
// Timeout limits the time a process can run.
Timeout time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping the
// ProcessProvider. The credentials will expire every 15 minutes by default.
func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
p := &ProcessProvider{
command: exec.Command(command),
Duration: DefaultDuration,
Timeout: DefaultTimeout,
MaxBufSize: DefaultBufSize,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsTimeout returns a pointer to a new Credentials object with
// the specified command and timeout, and default duration and max buffer size.
func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
p := NewCredentials(command, func(opt *ProcessProvider) {
opt.Timeout = timeout
})
return p
}
// NewCredentialsCommand returns a pointer to a new Credentials object with
// the specified command, and default timeout, duration and max buffer size.
func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
p := &ProcessProvider{
command: command,
Duration: DefaultDuration,
Timeout: DefaultTimeout,
MaxBufSize: DefaultBufSize,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
type credentialProcessResponse struct {
Version int
AccessKeyID string `json:"AccessKeyId"`
SecretAccessKey string
SessionToken string
Expiration *time.Time
}
// Retrieve executes the 'credential_process' and returns the credentials.
func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
out, err := p.executeCredentialProcess()
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
// Serialize and validate response
resp := &credentialProcessResponse{}
if err = json.Unmarshal(out, resp); err != nil {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderParse,
fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
err)
}
if resp.Version != 1 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderVersion,
errMsgProcessProviderVersion,
nil)
}
if len(resp.AccessKeyID) == 0 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderRequired,
errMsgProcessProviderMissKey,
nil)
}
if len(resp.SecretAccessKey) == 0 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderRequired,
errMsgProcessProviderMissSecret,
nil)
}
// Handle expiration
p.staticCreds = resp.Expiration == nil
if resp.Expiration != nil {
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
}
return credentials.Value{
ProviderName: ProviderName,
AccessKeyID: resp.AccessKeyID,
SecretAccessKey: resp.SecretAccessKey,
SessionToken: resp.SessionToken,
}, nil
}
// IsExpired returns true if the credentials retrieved are expired, or not yet
// retrieved.
func (p *ProcessProvider) IsExpired() bool {
if p.staticCreds {
return false
}
return p.Expiry.IsExpired()
}
// prepareCommand prepares the command to be executed.
func (p *ProcessProvider) prepareCommand() error {
var cmdArgs []string
if runtime.GOOS == "windows" {
cmdArgs = []string{"cmd.exe", "/C"}
} else {
cmdArgs = []string{"sh", "-c"}
}
if len(p.originalCommand) == 0 {
p.originalCommand = make([]string, len(p.command.Args))
copy(p.originalCommand, p.command.Args)
// check for empty command because it succeeds
if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
return awserr.New(
ErrCodeProcessProviderExecution,
fmt.Sprintf(
"%s: %s",
errMsgProcessProviderPrepareCmd,
errMsgProcessProviderEmptyCmd),
nil)
}
}
cmdArgs = append(cmdArgs, p.originalCommand...)
p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
p.command.Env = os.Environ()
return nil
}
// executeCredentialProcess starts the credential process on the OS and
// returns the results or an error.
func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
if err := p.prepareCommand(); err != nil {
return nil, err
}
// Setup the pipes
outReadPipe, outWritePipe, err := os.Pipe()
if err != nil {
return nil, awserr.New(
ErrCodeProcessProviderExecution,
errMsgProcessProviderPipe,
err)
}
p.command.Stderr = os.Stderr // display stderr on console for MFA
p.command.Stdout = outWritePipe // get creds json on process's stdout
p.command.Stdin = os.Stdin // enable stdin for MFA
output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
stdoutCh := make(chan error, 1)
go readInput(
io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
output,
stdoutCh)
execCh := make(chan error, 1)
go executeCommand(*p.command, execCh)
finished := false
var errors []error
for !finished {
select {
case readError := <-stdoutCh:
errors = appendError(errors, readError)
finished = true
case execError := <-execCh:
err := outWritePipe.Close()
errors = appendError(errors, err)
errors = appendError(errors, execError)
if errors != nil {
return output.Bytes(), awserr.NewBatchError(
ErrCodeProcessProviderExecution,
errMsgProcessProviderProcess,
errors)
}
case <-time.After(p.Timeout):
finished = true
return output.Bytes(), awserr.NewBatchError(
ErrCodeProcessProviderExecution,
errMsgProcessProviderTimeout,
errors) // errors can be nil
}
}
out := output.Bytes()
if runtime.GOOS == "windows" {
// windows adds slashes to quotes
out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
}
return out, nil
}
// appendError conveniently checks for nil before appending slice
func appendError(errors []error, err error) []error {
if err != nil {
return append(errors, err)
}
return errors
}
func executeCommand(cmd exec.Cmd, exec chan error) {
// Start the command
err := cmd.Start()
if err == nil {
err = cmd.Wait()
}
exec <- err
}
func readInput(r io.Reader, w io.Writer, read chan error) {
tee := io.TeeReader(r, w)
_, err := ioutil.ReadAll(tee)
if err == io.EOF {
err = nil
}
read <- err // will only arrive here when write end of pipe is closed
}

View File

@ -4,9 +4,8 @@ import (
"fmt"
"os"
"github.com/go-ini/ini"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/internal/ini"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
@ -77,36 +76,37 @@ func (p *SharedCredentialsProvider) IsExpired() bool {
// The credentials retrieved from the profile will be returned or error. Error will be
// returned if it fails to read from the file, or the data is invalid.
func loadProfile(filename, profile string) (Value, error) {
config, err := ini.Load(filename)
config, err := ini.OpenFile(filename)
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
}
iniProfile, err := config.GetSection(profile)
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
iniProfile, ok := config.GetSection(profile)
if !ok {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
}
id, err := iniProfile.GetKey("aws_access_key_id")
if err != nil {
id := iniProfile.String("aws_access_key_id")
if len(id) == 0 {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
err)
nil)
}
secret, err := iniProfile.GetKey("aws_secret_access_key")
if err != nil {
secret := iniProfile.String("aws_secret_access_key")
if len(secret) == 0 {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
nil)
}
// Default to empty string if not found
token := iniProfile.Key("aws_session_token")
token := iniProfile.String("aws_session_token")
return Value{
AccessKeyID: id.String(),
SecretAccessKey: secret.String(),
SessionToken: token.String(),
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: token,
ProviderName: SharedCredsProviderName,
}, nil
}

View File

@ -9,8 +9,6 @@ const StaticProviderName = "StaticProvider"
var (
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
//
// @readonly
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
)

View File

@ -6,7 +6,8 @@ go_library(
"doc.go",
"enable.go",
"metric.go",
"metricChan.go",
"metric_chan.go",
"metric_exception.go",
"reporter.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/aws/aws-sdk-go/aws/csm",

View File

@ -3,6 +3,8 @@ package csm
import (
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
)
type metricTime time.Time
@ -39,6 +41,12 @@ type metric struct {
SDKException *string `json:"SdkException,omitempty"`
SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"`
FinalAWSException *string `json:"FinalAwsException,omitempty"`
FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"`
FinalSDKException *string `json:"FinalSdkException,omitempty"`
FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"`
DestinationIP *string `json:"DestinationIp,omitempty"`
ConnectionReused *int `json:"ConnectionReused,omitempty"`
@ -48,4 +56,54 @@ type metric struct {
DNSLatency *int `json:"DnsLatency,omitempty"`
TCPLatency *int `json:"TcpLatency,omitempty"`
SSLLatency *int `json:"SslLatency,omitempty"`
MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
}
func (m *metric) TruncateFields() {
m.ClientID = truncateString(m.ClientID, 255)
m.UserAgent = truncateString(m.UserAgent, 256)
m.AWSException = truncateString(m.AWSException, 128)
m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512)
m.SDKException = truncateString(m.SDKException, 128)
m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512)
m.FinalAWSException = truncateString(m.FinalAWSException, 128)
m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512)
m.FinalSDKException = truncateString(m.FinalSDKException, 128)
m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512)
}
func truncateString(v *string, l int) *string {
if v != nil && len(*v) > l {
nv := (*v)[:l]
return &nv
}
return v
}
func (m *metric) SetException(e metricException) {
switch te := e.(type) {
case awsException:
m.AWSException = aws.String(te.exception)
m.AWSExceptionMessage = aws.String(te.message)
case sdkException:
m.SDKException = aws.String(te.exception)
m.SDKExceptionMessage = aws.String(te.message)
}
}
func (m *metric) SetFinalException(e metricException) {
switch te := e.(type) {
case awsException:
m.FinalAWSException = aws.String(te.exception)
m.FinalAWSExceptionMessage = aws.String(te.message)
case sdkException:
m.FinalSDKException = aws.String(te.exception)
m.FinalSDKExceptionMessage = aws.String(te.message)
}
}

View File

@ -0,0 +1,26 @@
package csm
type metricException interface {
Exception() string
Message() string
}
type requestException struct {
exception string
message string
}
func (e requestException) Exception() string {
return e.exception
}
func (e requestException) Message() string {
return e.message
}
type awsException struct {
requestException
}
type sdkException struct {
requestException
}

View File

@ -82,27 +82,29 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
if r.Error != nil {
if awserr, ok := r.Error.(awserr.Error); ok {
setError(&m, awserr)
m.SetException(getMetricException(awserr))
}
}
m.TruncateFields()
rep.metricsCh.Push(m)
}
func setError(m *metric, err awserr.Error) {
msg := err.Message()
func getMetricException(err awserr.Error) metricException {
msg := err.Error()
code := err.Code()
switch code {
case "RequestError",
"SerializationError",
request.CanceledErrorCode:
m.SDKException = &code
m.SDKExceptionMessage = &msg
return sdkException{
requestException{exception: code, message: msg},
}
default:
m.AWSException = &code
m.AWSExceptionMessage = &msg
return awsException{
requestException{exception: code, message: msg},
}
}
}
@ -113,16 +115,31 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) {
now := time.Now()
m := metric{
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Region: r.Config.Region,
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
}
if r.HTTPResponse != nil {
m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
}
if r.Error != nil {
if awserr, ok := r.Error.(awserr.Error); ok {
m.SetFinalException(getMetricException(awserr))
}
}
m.TruncateFields()
// TODO: Probably want to figure something out for logging dropped
// metrics
rep.metricsCh.Push(m)
@ -222,9 +239,22 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
return
}
apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric}
handlers.Complete.PushFrontNamed(apiCallHandler)
handlers.Complete.PushFrontNamed(request.NamedHandler{
Name: APICallMetricHandlerName,
Fn: rep.sendAPICallMetric,
})
apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric}
handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler)
handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{
Name: APICallAttemptMetricHandlerName,
Fn: rep.sendAPICallAttemptMetric,
})
}
// boolIntValue return 1 for true and 0 for false.
func boolIntValue(b bool) int {
if b {
return 1
}
return 0
}

View File

@ -24,6 +24,7 @@ import (
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
// A Defaults provides a collection of default values for SDK clients.
@ -92,17 +93,28 @@ func Handlers() request.Handlers {
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
return credentials.NewCredentials(&credentials.ChainProvider{
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
Providers: []credentials.Provider{
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
RemoteCredProvider(*cfg, handlers),
},
Providers: CredProviders(cfg, handlers),
})
}
// CredProviders returns the slice of providers used in
// the default credential chain.
//
// For applications that need to use some other provider (for example use
// different environment variables for legacy reasons) but still fall back
// on the default chain of providers. This allows that default chaint to be
// automatically updated
func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
return []credentials.Provider{
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
RemoteCredProvider(*cfg, handlers),
}
}
const (
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
)
// RemoteCredProvider returns a credentials provider for the default remote
@ -112,8 +124,8 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P
return localHTTPCredProvider(cfg, handlers, u)
}
if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 {
u := fmt.Sprintf("http://169.254.170.2%s", uri)
if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
return httpCredProvider(cfg, handlers, u)
}
@ -176,6 +188,7 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede
return endpointcreds.NewProviderClient(cfg, handlers, u,
func(p *endpointcreds.Provider) {
p.ExpiryWindow = 5 * time.Minute
p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
},
)
}

View File

@ -16,6 +16,7 @@ go_library(
"//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/corehandlers:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/internal/sdkuri:go_default_library",
],
)

View File

@ -4,12 +4,12 @@ import (
"encoding/json"
"fmt"
"net/http"
"path"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkuri"
)
// GetMetadata uses the path provided to request information from the EC2
@ -19,7 +19,7 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
op := &request.Operation{
Name: "GetMetadata",
HTTPMethod: "GET",
HTTPPath: path.Join("/", "meta-data", p),
HTTPPath: sdkuri.PathJoin("/meta-data", p),
}
output := &metadataOutput{}
@ -35,7 +35,7 @@ func (c *EC2Metadata) GetUserData() (string, error) {
op := &request.Operation{
Name: "GetUserData",
HTTPMethod: "GET",
HTTPPath: path.Join("/", "user-data"),
HTTPPath: "/user-data",
}
output := &metadataOutput{}
@ -56,7 +56,7 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
op := &request.Operation{
Name: "GetDynamicData",
HTTPMethod: "GET",
HTTPPath: path.Join("/", "dynamic", p),
HTTPPath: sdkuri.PathJoin("/dynamic", p),
}
output := &metadataOutput{}
@ -118,6 +118,10 @@ func (c *EC2Metadata) Region() (string, error) {
return "", err
}
if len(resp) == 0 {
return "", awserr.New("EC2MetadataError", "invalid Region response", nil)
}
// returns region without the suffix. Eg: us-west-2a becomes us-west-2
return resp[:len(resp)-1], nil
}

View File

@ -4,7 +4,7 @@
// This package's client can be disabled completely by setting the environment
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
// be used while the environemnt variable is set to true, (case insensitive).
// be used while the environment variable is set to true, (case insensitive).
package ec2metadata
import (
@ -72,6 +72,7 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
ServiceID: ServiceName,
Endpoint: endpoint,
APIVersion: "latest",
},

View File

@ -5,6 +5,7 @@ go_library(
srcs = [
"decode.go",
"defaults.go",
"dep_service_ids.go",
"doc.go",
"endpoints.go",
"v3model.go",

View File

@ -84,6 +84,8 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol
custAddEC2Metadata(p)
custAddS3DualStack(p)
custRmIotDataService(p)
custFixAppAutoscalingChina(p)
custFixAppAutoscalingUsGov(p)
}
return ps, nil
@ -94,7 +96,12 @@ func custAddS3DualStack(p *partition) {
return
}
s, ok := p.Services["s3"]
custAddDualstack(p, "s3")
custAddDualstack(p, "s3-control")
}
func custAddDualstack(p *partition, svcName string) {
s, ok := p.Services[svcName]
if !ok {
return
}
@ -102,7 +109,7 @@ func custAddS3DualStack(p *partition) {
s.Defaults.HasDualStack = boxedTrue
s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
p.Services["s3"] = s
p.Services[svcName] = s
}
func custAddEC2Metadata(p *partition) {
@ -122,6 +129,54 @@ func custRmIotDataService(p *partition) {
delete(p.Services, "data.iot")
}
func custFixAppAutoscalingChina(p *partition) {
if p.ID != "aws-cn" {
return
}
const serviceName = "application-autoscaling"
s, ok := p.Services[serviceName]
if !ok {
return
}
const expectHostname = `autoscaling.{region}.amazonaws.com`
if e, a := s.Defaults.Hostname, expectHostname; e != a {
fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
return
}
s.Defaults.Hostname = expectHostname + ".cn"
p.Services[serviceName] = s
}
func custFixAppAutoscalingUsGov(p *partition) {
if p.ID != "aws-us-gov" {
return
}
const serviceName = "application-autoscaling"
s, ok := p.Services[serviceName]
if !ok {
return
}
if a := s.Defaults.CredentialScope.Service; a != "" {
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
return
}
if a := s.Defaults.Hostname; a != "" {
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
return
}
s.Defaults.CredentialScope.Service = "application-autoscaling"
s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com"
p.Services[serviceName] = s
}
type decodeModelError struct {
awsError
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,141 @@
package endpoints
// Service identifiers
//
// Deprecated: Use client package's EndpointID value instead of these
// ServiceIDs. These IDs are not maintained, and are out of date.
const (
A4bServiceID = "a4b" // A4b.
AcmServiceID = "acm" // Acm.
AcmPcaServiceID = "acm-pca" // AcmPca.
ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
ApiPricingServiceID = "api.pricing" // ApiPricing.
ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
ApigatewayServiceID = "apigateway" // Apigateway.
ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
Appstream2ServiceID = "appstream2" // Appstream2.
AppsyncServiceID = "appsync" // Appsync.
AthenaServiceID = "athena" // Athena.
AutoscalingServiceID = "autoscaling" // Autoscaling.
AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
BatchServiceID = "batch" // Batch.
BudgetsServiceID = "budgets" // Budgets.
CeServiceID = "ce" // Ce.
ChimeServiceID = "chime" // Chime.
Cloud9ServiceID = "cloud9" // Cloud9.
ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
CloudformationServiceID = "cloudformation" // Cloudformation.
CloudfrontServiceID = "cloudfront" // Cloudfront.
CloudhsmServiceID = "cloudhsm" // Cloudhsm.
Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
CloudsearchServiceID = "cloudsearch" // Cloudsearch.
CloudtrailServiceID = "cloudtrail" // Cloudtrail.
CodebuildServiceID = "codebuild" // Codebuild.
CodecommitServiceID = "codecommit" // Codecommit.
CodedeployServiceID = "codedeploy" // Codedeploy.
CodepipelineServiceID = "codepipeline" // Codepipeline.
CodestarServiceID = "codestar" // Codestar.
CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
CognitoSyncServiceID = "cognito-sync" // CognitoSync.
ComprehendServiceID = "comprehend" // Comprehend.
ConfigServiceID = "config" // Config.
CurServiceID = "cur" // Cur.
DatapipelineServiceID = "datapipeline" // Datapipeline.
DaxServiceID = "dax" // Dax.
DevicefarmServiceID = "devicefarm" // Devicefarm.
DirectconnectServiceID = "directconnect" // Directconnect.
DiscoveryServiceID = "discovery" // Discovery.
DmsServiceID = "dms" // Dms.
DsServiceID = "ds" // Ds.
DynamodbServiceID = "dynamodb" // Dynamodb.
Ec2ServiceID = "ec2" // Ec2.
Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
EcrServiceID = "ecr" // Ecr.
EcsServiceID = "ecs" // Ecs.
ElasticacheServiceID = "elasticache" // Elasticache.
ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
EmailServiceID = "email" // Email.
EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
EsServiceID = "es" // Es.
EventsServiceID = "events" // Events.
FirehoseServiceID = "firehose" // Firehose.
FmsServiceID = "fms" // Fms.
GameliftServiceID = "gamelift" // Gamelift.
GlacierServiceID = "glacier" // Glacier.
GlueServiceID = "glue" // Glue.
GreengrassServiceID = "greengrass" // Greengrass.
GuarddutyServiceID = "guardduty" // Guardduty.
HealthServiceID = "health" // Health.
IamServiceID = "iam" // Iam.
ImportexportServiceID = "importexport" // Importexport.
InspectorServiceID = "inspector" // Inspector.
IotServiceID = "iot" // Iot.
IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
KinesisServiceID = "kinesis" // Kinesis.
KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
KmsServiceID = "kms" // Kms.
LambdaServiceID = "lambda" // Lambda.
LightsailServiceID = "lightsail" // Lightsail.
LogsServiceID = "logs" // Logs.
MachinelearningServiceID = "machinelearning" // Machinelearning.
MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
MediaconvertServiceID = "mediaconvert" // Mediaconvert.
MedialiveServiceID = "medialive" // Medialive.
MediapackageServiceID = "mediapackage" // Mediapackage.
MediastoreServiceID = "mediastore" // Mediastore.
MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
MghServiceID = "mgh" // Mgh.
MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
ModelsLexServiceID = "models.lex" // ModelsLex.
MonitoringServiceID = "monitoring" // Monitoring.
MturkRequesterServiceID = "mturk-requester" // MturkRequester.
NeptuneServiceID = "neptune" // Neptune.
OpsworksServiceID = "opsworks" // Opsworks.
OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
OrganizationsServiceID = "organizations" // Organizations.
PinpointServiceID = "pinpoint" // Pinpoint.
PollyServiceID = "polly" // Polly.
RdsServiceID = "rds" // Rds.
RedshiftServiceID = "redshift" // Redshift.
RekognitionServiceID = "rekognition" // Rekognition.
ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
Route53ServiceID = "route53" // Route53.
Route53domainsServiceID = "route53domains" // Route53domains.
RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
S3ServiceID = "s3" // S3.
S3ControlServiceID = "s3-control" // S3Control.
SagemakerServiceID = "api.sagemaker" // Sagemaker.
SdbServiceID = "sdb" // Sdb.
SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
ShieldServiceID = "shield" // Shield.
SmsServiceID = "sms" // Sms.
SnowballServiceID = "snowball" // Snowball.
SnsServiceID = "sns" // Sns.
SqsServiceID = "sqs" // Sqs.
SsmServiceID = "ssm" // Ssm.
StatesServiceID = "states" // States.
StoragegatewayServiceID = "storagegateway" // Storagegateway.
StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
StsServiceID = "sts" // Sts.
SupportServiceID = "support" // Support.
SwfServiceID = "swf" // Swf.
TaggingServiceID = "tagging" // Tagging.
TransferServiceID = "transfer" // Transfer.
TranslateServiceID = "translate" // Translate.
WafServiceID = "waf" // Waf.
WafRegionalServiceID = "waf-regional" // WafRegional.
WorkdocsServiceID = "workdocs" // Workdocs.
WorkmailServiceID = "workmail" // Workmail.
WorkspacesServiceID = "workspaces" // Workspaces.
XrayServiceID = "xray" // Xray.
)

View File

@ -35,7 +35,7 @@ type Options struct {
//
// If resolving an endpoint on the partition list the provided region will
// be used to determine which partition's domain name pattern to the service
// endpoint ID with. If both the service and region are unkonwn and resolving
// endpoint ID with. If both the service and region are unknown and resolving
// the endpoint on partition list an UnknownEndpointError error will be returned.
//
// If resolving and endpoint on a partition specific resolver that partition's

View File

@ -16,6 +16,10 @@ import (
type CodeGenOptions struct {
// Options for how the model will be decoded.
DecodeModelOptions DecodeModelOptions
// Disables code generation of the service endpoint prefix IDs defined in
// the model.
DisableGenerateServiceIDs bool
}
// Set combines all of the option functions together
@ -39,8 +43,16 @@ func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGe
return err
}
v := struct {
Resolver
CodeGenOptions
}{
Resolver: resolver,
CodeGenOptions: opts,
}
tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil {
if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil {
return fmt.Errorf("failed to execute template, %v", err)
}
@ -166,15 +178,17 @@ import (
"regexp"
)
{{ template "partition consts" . }}
{{ template "partition consts" $.Resolver }}
{{ range $_, $partition := . }}
{{ range $_, $partition := $.Resolver }}
{{ template "partition region consts" $partition }}
{{ end }}
{{ template "service consts" . }}
{{ if not $.DisableGenerateServiceIDs -}}
{{ template "service consts" $.Resolver }}
{{- end }}
{{ template "endpoint resolvers" . }}
{{ template "endpoint resolvers" $.Resolver }}
{{- end }}
{{ define "partition consts" }}

View File

@ -5,13 +5,9 @@ import "github.com/aws/aws-sdk-go/aws/awserr"
var (
// ErrMissingRegion is an error that is returned if region configuration is
// not found.
//
// @readonly
ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
// resolved for a service.
//
// @readonly
ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
)

View File

@ -19,6 +19,7 @@ type Handlers struct {
UnmarshalError HandlerList
Retry HandlerList
AfterRetry HandlerList
CompleteAttempt HandlerList
Complete HandlerList
}
@ -36,6 +37,7 @@ func (h *Handlers) Copy() Handlers {
UnmarshalMeta: h.UnmarshalMeta.copy(),
Retry: h.Retry.copy(),
AfterRetry: h.AfterRetry.copy(),
CompleteAttempt: h.CompleteAttempt.copy(),
Complete: h.Complete.copy(),
}
}
@ -53,6 +55,7 @@ func (h *Handlers) Clear() {
h.ValidateResponse.Clear()
h.Retry.Clear()
h.AfterRetry.Clear()
h.CompleteAttempt.Clear()
h.Complete.Clear()
}

View File

@ -4,7 +4,6 @@ import (
"bytes"
"fmt"
"io"
"net"
"net/http"
"net/url"
"reflect"
@ -122,7 +121,6 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
Handlers: handlers.Copy(),
Retryer: retryer,
AttemptTime: time.Now(),
Time: time.Now(),
ExpireTime: 0,
Operation: operation,
@ -266,7 +264,9 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) {
}
// Presign returns the request's signed URL. Error will be returned
// if the signing fails.
// if the signing fails. The expire parameter is only used for presigned Amazon
// S3 API requests. All other AWS services will use a fixed expiration
// time of 15 minutes.
//
// It is invalid to create a presigned URL with a expire duration 0 or less. An
// error is returned if expire duration is 0 or less.
@ -283,7 +283,9 @@ func (r *Request) Presign(expire time.Duration) (string, error) {
}
// PresignRequest behaves just like presign, with the addition of returning a
// set of headers that were signed.
// set of headers that were signed. The expire parameter is only used for
// presigned Amazon S3 API requests. All other AWS services will use a fixed
// expiration time of 15 minutes.
//
// It is invalid to create a presigned URL with a expire duration 0 or less. An
// error is returned if expire duration is 0 or less.
@ -462,80 +464,78 @@ func (r *Request) Send() error {
r.Handlers.Complete.Run(r)
}()
if err := r.Error; err != nil {
return err
}
for {
r.Error = nil
r.AttemptTime = time.Now()
if aws.BoolValue(r.Retryable) {
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
}
// The previous http.Request will have a reference to the r.Body
// and the HTTP Client's Transport may still be reading from
// the request's body even though the Client's Do returned.
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
r.ResetBody()
// Closing response body to ensure that no response body is leaked
// between retry attempts.
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
r.HTTPResponse.Body.Close()
}
if err := r.Sign(); err != nil {
debugLogReqError(r, "Sign Request", false, err)
return err
}
r.Sign()
if r.Error != nil {
return r.Error
}
r.Retryable = nil
r.Handlers.Send.Run(r)
if r.Error != nil {
if !shouldRetryCancel(r) {
return r.Error
}
err := r.Error
if err := r.sendRequest(); err == nil {
return nil
} else if !shouldRetryCancel(r.Error) {
return err
} else {
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Send Request", false, err)
if r.Error != nil || !aws.BoolValue(r.Retryable) {
return r.Error
}
debugLogReqError(r, "Send Request", true, err)
r.prepareRetry()
continue
}
r.Handlers.UnmarshalMeta.Run(r)
r.Handlers.ValidateResponse.Run(r)
if r.Error != nil {
r.Handlers.UnmarshalError.Run(r)
err := r.Error
}
}
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Validate Response", false, err)
return r.Error
}
debugLogReqError(r, "Validate Response", true, err)
continue
}
func (r *Request) prepareRetry() {
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
}
r.Handlers.Unmarshal.Run(r)
if r.Error != nil {
err := r.Error
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Unmarshal Response", false, err)
return r.Error
}
debugLogReqError(r, "Unmarshal Response", true, err)
continue
}
// The previous http.Request will have a reference to the r.Body
// and the HTTP Client's Transport may still be reading from
// the request's body even though the Client's Do returned.
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
r.ResetBody()
break
// Closing response body to ensure that no response body is leaked
// between retry attempts.
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
r.HTTPResponse.Body.Close()
}
}
func (r *Request) sendRequest() (sendErr error) {
defer r.Handlers.CompleteAttempt.Run(r)
r.Retryable = nil
r.Handlers.Send.Run(r)
if r.Error != nil {
debugLogReqError(r, "Send Request", r.WillRetry(), r.Error)
return r.Error
}
r.Handlers.UnmarshalMeta.Run(r)
r.Handlers.ValidateResponse.Run(r)
if r.Error != nil {
r.Handlers.UnmarshalError.Run(r)
debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error)
return r.Error
}
r.Handlers.Unmarshal.Run(r)
if r.Error != nil {
debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error)
return r.Error
}
return nil
@ -561,30 +561,46 @@ func AddToUserAgent(r *Request, s string) {
r.HTTPRequest.Header.Set("User-Agent", s)
}
func shouldRetryCancel(r *Request) bool {
awsErr, ok := r.Error.(awserr.Error)
timeoutErr := false
errStr := r.Error.Error()
if ok {
if awsErr.Code() == CanceledErrorCode {
type temporary interface {
Temporary() bool
}
func shouldRetryCancel(err error) bool {
switch err := err.(type) {
case awserr.Error:
if err.Code() == CanceledErrorCode {
return false
}
err := awsErr.OrigErr()
netErr, netOK := err.(net.Error)
timeoutErr = netOK && netErr.Temporary()
if urlErr, ok := err.(*url.Error); !timeoutErr && ok {
errStr = urlErr.Err.Error()
return shouldRetryCancel(err.OrigErr())
case *url.Error:
if strings.Contains(err.Error(), "connection refused") {
// Refused connections should be retried as the service may not yet
// be running on the port. Go TCP dial considers refused
// connections as not temporary.
return true
}
// *url.Error only implements Temporary after golang 1.6 but since
// url.Error only wraps the error:
return shouldRetryCancel(err.Err)
case temporary:
// If the error is temporary, we want to allow continuation of the
// retry process
return err.Temporary()
case nil:
// `awserr.Error.OrigErr()` can be nil, meaning there was an error but
// because we don't know the cause, it is marked as retriable. See
// TestRequest4xxUnretryable for an example.
return true
default:
switch err.Error() {
case "net/http: request canceled",
"net/http: request canceled while waiting for connection":
// known 1.5 error case when an http request is cancelled
return false
}
// here we don't know the error; so we allow a retry.
return true
}
// There can be two types of canceled errors here.
// The first being a net.Error and the other being an error.
// If the request was timed out, we want to continue the retry
// process. Otherwise, return the canceled error.
return timeoutErr ||
(errStr != "net/http: request canceled" &&
errStr != "net/http: request canceled while waiting for connection")
}
// SanitizeHostForHeader removes default port from host and updates request.Host

View File

@ -40,6 +40,7 @@ var throttleCodes = map[string]struct{}{
"RequestThrottled": {},
"TooManyRequestsException": {}, // Lambda functions
"PriorRequestNotComplete": {}, // Route53
"TransactionInProgressException": {},
}
// credsExpiredCodes is a collection of error codes which signify the credentials
@ -97,7 +98,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
}
if t, ok := err.(temporaryError); ok {
return t.Temporary()
return t.Temporary() || isErrConnectionReset(err)
}
return isErrConnectionReset(err)

View File

@ -17,6 +17,12 @@ const (
ParamMinValueErrCode = "ParamMinValueError"
// ParamMinLenErrCode is the error code for fields without enough elements.
ParamMinLenErrCode = "ParamMinLenError"
// ParamMaxLenErrCode is the error code for value being too long.
ParamMaxLenErrCode = "ParamMaxLenError"
// ParamFormatErrCode is the error code for a field with invalid
// format or characters.
ParamFormatErrCode = "ParamFormatInvalidError"
)
// Validator provides a way for types to perform validation logic on their
@ -232,3 +238,49 @@ func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
func (e *ErrParamMinLen) MinLen() int {
return e.min
}
// An ErrParamMaxLen represents a maximum length parameter error.
type ErrParamMaxLen struct {
errInvalidParam
max int
}
// NewErrParamMaxLen creates a new maximum length parameter error.
func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen {
return &ErrParamMaxLen{
errInvalidParam: errInvalidParam{
code: ParamMaxLenErrCode,
field: field,
msg: fmt.Sprintf("maximum size of %v, %v", max, value),
},
max: max,
}
}
// MaxLen returns the field's required minimum length.
func (e *ErrParamMaxLen) MaxLen() int {
return e.max
}
// An ErrParamFormat represents a invalid format parameter error.
type ErrParamFormat struct {
errInvalidParam
format string
}
// NewErrParamFormat creates a new invalid format parameter error.
func NewErrParamFormat(field string, format, value string) *ErrParamFormat {
return &ErrParamFormat{
errInvalidParam: errInvalidParam{
code: ParamFormatErrCode,
field: field,
msg: fmt.Sprintf("format %v, %v", format, value),
},
format: format,
}
}
// Format returns the field's required format.
func (e *ErrParamFormat) Format() string {
return e.format
}

View File

@ -17,12 +17,14 @@ go_library(
"//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/corehandlers:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/csm:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/defaults:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/endpoints:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library",
"//vendor/github.com/go-ini/ini:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/internal/ini:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/internal/shareddefaults:go_default_library",
],
)

View File

@ -99,7 +99,7 @@ handler logs every request and its payload made by a service client:
sess.Handlers.Send.PushFront(func(r *request.Request) {
// Log every request made and its payload
logger.Println("Request: %s/%s, Payload: %s",
logger.Printf("Request: %s/%s, Payload: %s",
r.ClientInfo.ServiceName, r.Operation, r.Params)
})
@ -128,7 +128,7 @@ read. The Session will be created from configuration values from the shared
credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
Credentials are the values the SDK should use for authenticating requests with
AWS Services. They arfrom a configuration file will need to include both
AWS Services. They are from a configuration file will need to include both
aws_access_key_id and aws_secret_access_key must be provided together in the
same file to be considered valid. The values will be ignored if not a complete
group. aws_session_token is an optional field that can be provided if both of
@ -183,7 +183,7 @@ be returned when creating the session.
// from assumed role.
svc := s3.New(sess)
To setup assume role outside of a session see the stscrds.AssumeRoleProvider
To setup assume role outside of a session see the stscreds.AssumeRoleProvider
documentation.
Environment Variables

View File

@ -4,6 +4,7 @@ import (
"os"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
)
@ -79,7 +80,7 @@ type envConfig struct {
// AWS_CONFIG_FILE=$HOME/my_shared_config
SharedConfigFile string
// Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
// Sets the path to a custom Credentials Authority (CA) Bundle PEM file
// that the SDK will use instead of the system's root CA bundle.
// Only use this if you want to configure the SDK to use a custom set
// of CAs.
@ -101,6 +102,12 @@ type envConfig struct {
CSMEnabled bool
CSMPort string
CSMClientID string
enableEndpointDiscovery string
// Enables endpoint discovery via environment variables.
//
// AWS_ENABLE_ENDPOINT_DISCOVERY=true
EnableEndpointDiscovery *bool
}
var (
@ -125,6 +132,10 @@ var (
"AWS_SESSION_TOKEN",
}
enableEndpointDiscoveryEnvKey = []string{
"AWS_ENABLE_ENDPOINT_DISCOVERY",
}
regionEnvKeys = []string{
"AWS_REGION",
"AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
@ -194,6 +205,12 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
setFromEnvVal(&cfg.Region, regionKeys)
setFromEnvVal(&cfg.Profile, profileKeys)
// endpoint discovery is in reference to it being enabled.
setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey)
if len(cfg.enableEndpointDiscovery) > 0 {
cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false")
}
setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)

View File

@ -14,13 +14,32 @@ import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/processcreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/csm"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
const (
// ErrCodeSharedConfig represents an error that occurs in the shared
// configuration logic
ErrCodeSharedConfig = "SharedConfigErr"
)
// ErrSharedConfigSourceCollision will be returned if a section contains both
// source_profile and credential_source
var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil)
// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
// variables are empty and Environment was set as the credential source
var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil)
// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided
var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil)
// A Session provides a central location to create service clients from and
// store configurations and request handlers for those services.
//
@ -434,8 +453,67 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
}
}
if cfg.EnableEndpointDiscovery == nil {
if envCfg.EnableEndpointDiscovery != nil {
cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery)
} else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil {
cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery)
}
}
// Configure credentials if not already set
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
// inspect the profile to see if a credential source has been specified.
if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 {
// if both credential_source and source_profile have been set, return an error
// as this is undefined behavior.
if len(sharedCfg.AssumeRole.SourceProfile) > 0 {
return ErrSharedConfigSourceCollision
}
// valid credential source values
const (
credSourceEc2Metadata = "Ec2InstanceMetadata"
credSourceEnvironment = "Environment"
credSourceECSContainer = "EcsContainer"
)
switch sharedCfg.AssumeRole.CredentialSource {
case credSourceEc2Metadata:
cfgCp := *cfg
p := defaults.RemoteCredProvider(cfgCp, handlers)
cfgCp.Credentials = credentials.NewCredentials(p)
if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
// AssumeRole Token provider is required if doing Assume Role
// with MFA.
return AssumeRoleTokenProviderNotSetError{}
}
cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
case credSourceEnvironment:
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
envCfg.Creds,
)
case credSourceECSContainer:
if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
return ErrSharedConfigECSContainerEnvVarEmpty
}
cfgCp := *cfg
p := defaults.RemoteCredProvider(cfgCp, handlers)
creds := credentials.NewCredentials(p)
cfg.Credentials = creds
default:
return ErrSharedConfigInvalidCredSource
}
return nil
}
if len(envCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
envCfg.Creds,
@ -445,36 +523,22 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.AssumeRoleSource.Creds,
)
if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
// AssumeRole Token provider is required if doing Assume Role
// with MFA.
return AssumeRoleTokenProviderNotSetError{}
}
cfg.Credentials = stscreds.NewCredentials(
&Session{
Config: &cfgCp,
Handlers: handlers.Copy(),
},
sharedCfg.AssumeRole.RoleARN,
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
// Assume role with external ID
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
}
// Assume role with MFA
if len(sharedCfg.AssumeRole.MFASerial) > 0 {
opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
}
},
)
cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
} else if len(sharedCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.Creds,
)
} else if len(sharedCfg.CredentialProcess) > 0 {
cfg.Credentials = processcreds.NewCredentials(
sharedCfg.CredentialProcess,
)
} else {
// Fallback to default credentials provider, include mock errors
// for the credential chain so user can identify why credentials
@ -493,6 +557,30 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
return nil
}
func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials {
return stscreds.NewCredentials(
&Session{
Config: &cfg,
Handlers: handlers.Copy(),
},
sharedCfg.AssumeRole.RoleARN,
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
// Assume role with external ID
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
}
// Assume role with MFA
if len(sharedCfg.AssumeRole.MFASerial) > 0 {
opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
}
},
)
}
// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
// MFAToken option is not set when shared config is configured load assume a
// role with an MFA token.

View File

@ -2,11 +2,11 @@ package session
import (
"fmt"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/go-ini/ini"
"github.com/aws/aws-sdk-go/internal/ini"
)
const (
@ -16,15 +16,21 @@ const (
sessionTokenKey = `aws_session_token` // optional
// Assume Role Credentials group
roleArnKey = `role_arn` // group required
sourceProfileKey = `source_profile` // group required
externalIDKey = `external_id` // optional
mfaSerialKey = `mfa_serial` // optional
roleSessionNameKey = `role_session_name` // optional
roleArnKey = `role_arn` // group required
sourceProfileKey = `source_profile` // group required (or credential_source)
credentialSourceKey = `credential_source` // group required (or source_profile)
externalIDKey = `external_id` // optional
mfaSerialKey = `mfa_serial` // optional
roleSessionNameKey = `role_session_name` // optional
// Additional Config fields
regionKey = `region`
// endpoint discovery group
enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
// External Credential Process
credentialProcessKey = `credential_process`
// DefaultSharedConfigProfile is the default profile to be used when
// loading configuration from the config files if another profile name
// is not provided.
@ -32,11 +38,12 @@ const (
)
type assumeRoleConfig struct {
RoleARN string
SourceProfile string
ExternalID string
MFASerial string
RoleSessionName string
RoleARN string
SourceProfile string
CredentialSource string
ExternalID string
MFASerial string
RoleSessionName string
}
// sharedConfig represents the configuration fields of the SDK config files.
@ -55,16 +62,25 @@ type sharedConfig struct {
AssumeRole assumeRoleConfig
AssumeRoleSource *sharedConfig
// An external process to request credentials
CredentialProcess string
// Region is the region the SDK should use for looking up AWS service endpoints
// and signing requests.
//
// region
Region string
// EnableEndpointDiscovery can be enabled in the shared config by setting
// endpoint_discovery_enabled to true
//
// endpoint_discovery_enabled = true
EnableEndpointDiscovery *bool
}
type sharedConfigFile struct {
Filename string
IniData *ini.File
IniData ini.Sections
}
// loadSharedConfig retrieves the configuration from the list of files
@ -105,19 +121,16 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
files := make([]sharedConfigFile, 0, len(filenames))
for _, filename := range filenames {
b, err := ioutil.ReadFile(filename)
if err != nil {
sections, err := ini.OpenFile(filename)
if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile {
// Skip files which can't be opened and read for whatever reason
continue
}
f, err := ini.Load(b)
if err != nil {
} else if err != nil {
return nil, SharedConfigLoadError{Filename: filename, Err: err}
}
files = append(files, sharedConfigFile{
Filename: filename, IniData: f,
Filename: filename, IniData: sections,
})
}
@ -127,6 +140,13 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
var assumeRoleSrc sharedConfig
if len(cfg.AssumeRole.CredentialSource) > 0 {
// setAssumeRoleSource is only called when source_profile is found.
// If both source_profile and credential_source are set, then
// ErrSharedConfigSourceCollision will be returned
return ErrSharedConfigSourceCollision
}
// Multiple level assume role chains are not support
if cfg.AssumeRole.SourceProfile == origProfile {
assumeRoleSrc = *cfg
@ -171,45 +191,59 @@ func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFil
// if a config file only includes aws_access_key_id but no aws_secret_access_key
// the aws_access_key_id will be ignored.
func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
section, err := file.IniData.GetSection(profile)
if err != nil {
section, ok := file.IniData.GetSection(profile)
if !ok {
// Fallback to to alternate profile name: profile <name>
section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
if err != nil {
return SharedConfigProfileNotExistsError{Profile: profile, Err: err}
section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
if !ok {
return SharedConfigProfileNotExistsError{Profile: profile, Err: nil}
}
}
// Shared Credentials
akid := section.Key(accessKeyIDKey).String()
secret := section.Key(secretAccessKey).String()
akid := section.String(accessKeyIDKey)
secret := section.String(secretAccessKey)
if len(akid) > 0 && len(secret) > 0 {
cfg.Creds = credentials.Value{
AccessKeyID: akid,
SecretAccessKey: secret,
SessionToken: section.Key(sessionTokenKey).String(),
SessionToken: section.String(sessionTokenKey),
ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
}
}
// Assume Role
roleArn := section.Key(roleArnKey).String()
srcProfile := section.Key(sourceProfileKey).String()
if len(roleArn) > 0 && len(srcProfile) > 0 {
roleArn := section.String(roleArnKey)
srcProfile := section.String(sourceProfileKey)
credentialSource := section.String(credentialSourceKey)
hasSource := len(srcProfile) > 0 || len(credentialSource) > 0
if len(roleArn) > 0 && hasSource {
cfg.AssumeRole = assumeRoleConfig{
RoleARN: roleArn,
SourceProfile: srcProfile,
ExternalID: section.Key(externalIDKey).String(),
MFASerial: section.Key(mfaSerialKey).String(),
RoleSessionName: section.Key(roleSessionNameKey).String(),
RoleARN: roleArn,
SourceProfile: srcProfile,
CredentialSource: credentialSource,
ExternalID: section.String(externalIDKey),
MFASerial: section.String(mfaSerialKey),
RoleSessionName: section.String(roleSessionNameKey),
}
}
// `credential_process`
if credProc := section.String(credentialProcessKey); len(credProc) > 0 {
cfg.CredentialProcess = credProc
}
// Region
if v := section.Key(regionKey).String(); len(v) > 0 {
if v := section.String(regionKey); len(v) > 0 {
cfg.Region = v
}
// Endpoint discovery
if section.Has(enableEndpointDiscoveryKey) {
v := section.Bool(enableEndpointDiscoveryKey)
cfg.EnableEndpointDiscovery = &v
}
return nil
}

View File

@ -98,25 +98,25 @@ var ignoredHeaders = rules{
var requiredSignedHeaders = rules{
whitelist{
mapRule{
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
"Content-Encoding": struct{}{},
"Content-Language": struct{}{},
"Content-Md5": struct{}{},
"Content-Type": struct{}{},
"Expires": struct{}{},
"If-Match": struct{}{},
"If-Modified-Since": struct{}{},
"If-None-Match": struct{}{},
"If-Unmodified-Since": struct{}{},
"Range": struct{}{},
"X-Amz-Acl": struct{}{},
"X-Amz-Copy-Source": struct{}{},
"X-Amz-Copy-Source-If-Match": struct{}{},
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
"X-Amz-Copy-Source-If-None-Match": struct{}{},
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
"X-Amz-Copy-Source-Range": struct{}{},
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
"Content-Encoding": struct{}{},
"Content-Language": struct{}{},
"Content-Md5": struct{}{},
"Content-Type": struct{}{},
"Expires": struct{}{},
"If-Match": struct{}{},
"If-Modified-Since": struct{}{},
"If-None-Match": struct{}{},
"If-Unmodified-Since": struct{}{},
"Range": struct{}{},
"X-Amz-Acl": struct{}{},
"X-Amz-Copy-Source": struct{}{},
"X-Amz-Copy-Source-If-Match": struct{}{},
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
"X-Amz-Copy-Source-If-None-Match": struct{}{},
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
"X-Amz-Copy-Source-Range": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
@ -134,6 +134,7 @@ var requiredSignedHeaders = rules{
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Storage-Class": struct{}{},
"X-Amz-Tagging": struct{}{},
"X-Amz-Website-Redirect-Location": struct{}{},
"X-Amz-Content-Sha256": struct{}{},
},
@ -181,7 +182,7 @@ type Signer struct {
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
DisableURIPathEscaping bool
// Disales the automatical setting of the HTTP request's Body field with the
// Disables the automatical setting of the HTTP request's Body field with the
// io.ReadSeeker passed in to the signer. This is useful if you're using a
// custom wrapper around the body for the io.ReadSeeker and want to preserve
// the Body value on the Request.Body.
@ -421,7 +422,7 @@ var SignRequestHandler = request.NamedHandler{
// If the credentials of the request's config are set to
// credentials.AnonymousCredentials the request will not be signed.
func SignSDKRequest(req *request.Request) {
signSDKRequestWithCurrTime(req, time.Now)
SignSDKRequestWithCurrentTime(req, time.Now)
}
// BuildNamedHandler will build a generic handler for signing.
@ -429,12 +430,15 @@ func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler
return request.NamedHandler{
Name: name,
Fn: func(req *request.Request) {
signSDKRequestWithCurrTime(req, time.Now, opts...)
SignSDKRequestWithCurrentTime(req, time.Now, opts...)
},
}
}
func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
// function passed in. Behaves the same as SignSDKRequest with the exception
// the request is signed with the value returned by the current time function.
func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
@ -470,13 +474,9 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
opt(v4)
}
signingTime := req.Time
if !req.LastSignedAt.IsZero() {
signingTime = req.LastSignedAt
}
curTime := curTimeFn()
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
name, region, req.ExpireTime, req.ExpireTime > 0, signingTime,
name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
)
if err != nil {
req.Error = err
@ -485,7 +485,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
}
req.SignedHeaderVals = signedHeaders
req.LastSignedAt = curTimeFn()
req.LastSignedAt = curTime
}
const logSignInfoMsg = `DEBUG: Request Signature:
@ -739,14 +739,22 @@ func makeSha256Reader(reader io.ReadSeeker) []byte {
start, _ := reader.Seek(0, sdkio.SeekCurrent)
defer reader.Seek(start, sdkio.SeekStart)
io.Copy(hash, reader)
// Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
// smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
size, err := aws.SeekerLen(reader)
if err != nil {
io.Copy(hash, reader)
} else {
io.CopyN(hash, reader, size)
}
return hash.Sum(nil)
}
const doubleSpace = " "
// stripExcessSpaces will rewrite the passed in slice's string values to not
// contain muliple side-by-side spaces.
// contain multiple side-by-side spaces.
func stripExcessSpaces(vals []string) {
var j, k, l, m, spaces int
for i, str := range vals {

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.14.12"
const SDKVersion = "1.16.26"

47
vendor/github.com/aws/aws-sdk-go/internal/ini/BUILD generated vendored Normal file
View File

@ -0,0 +1,47 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"ast.go",
"comma_token.go",
"comment_token.go",
"doc.go",
"empty_token.go",
"expression.go",
"ini.go",
"ini_lexer.go",
"ini_parser.go",
"literal_tokens.go",
"newline_token.go",
"number_helper.go",
"op_tokens.go",
"parse_error.go",
"parse_stack.go",
"sep_tokens.go",
"skipper.go",
"statement.go",
"value_util.go",
"visitor.go",
"walker.go",
"ws_token.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/aws/aws-sdk-go/internal/ini",
importpath = "github.com/aws/aws-sdk-go/internal/ini",
visibility = ["//vendor/github.com/aws/aws-sdk-go:__subpackages__"],
deps = ["//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

120
vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
package ini
// ASTKind represents different states in the parse table
// and the type of AST that is being constructed
type ASTKind int
// ASTKind* is used in the parse table to transition between
// the different states
const (
ASTKindNone = ASTKind(iota)
ASTKindStart
ASTKindExpr
ASTKindEqualExpr
ASTKindStatement
ASTKindSkipStatement
ASTKindExprStatement
ASTKindSectionStatement
ASTKindNestedSectionStatement
ASTKindCompletedNestedSectionStatement
ASTKindCommentStatement
ASTKindCompletedSectionStatement
)
func (k ASTKind) String() string {
switch k {
case ASTKindNone:
return "none"
case ASTKindStart:
return "start"
case ASTKindExpr:
return "expr"
case ASTKindStatement:
return "stmt"
case ASTKindSectionStatement:
return "section_stmt"
case ASTKindExprStatement:
return "expr_stmt"
case ASTKindCommentStatement:
return "comment"
case ASTKindNestedSectionStatement:
return "nested_section_stmt"
case ASTKindCompletedSectionStatement:
return "completed_stmt"
case ASTKindSkipStatement:
return "skip"
default:
return ""
}
}
// AST interface allows us to determine what kind of node we
// are on and casting may not need to be necessary.
//
// The root is always the first node in Children
type AST struct {
Kind ASTKind
Root Token
RootToken bool
Children []AST
}
func newAST(kind ASTKind, root AST, children ...AST) AST {
return AST{
Kind: kind,
Children: append([]AST{root}, children...),
}
}
func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
return AST{
Kind: kind,
Root: root,
RootToken: true,
Children: children,
}
}
// AppendChild will append to the list of children an AST has.
func (a *AST) AppendChild(child AST) {
a.Children = append(a.Children, child)
}
// GetRoot will return the root AST which can be the first entry
// in the children list or a token.
func (a *AST) GetRoot() AST {
if a.RootToken {
return *a
}
if len(a.Children) == 0 {
return AST{}
}
return a.Children[0]
}
// GetChildren will return the current AST's list of children
func (a *AST) GetChildren() []AST {
if len(a.Children) == 0 {
return []AST{}
}
if a.RootToken {
return a.Children
}
return a.Children[1:]
}
// SetChildren will set and override all children of the AST.
func (a *AST) SetChildren(children []AST) {
if a.RootToken {
a.Children = children
} else {
a.Children = append(a.Children[:1], children...)
}
}
// Start is used to indicate the starting state of the parse table.
var Start = newAST(ASTKindStart, AST{})

View File

@ -0,0 +1,11 @@
package ini
var commaRunes = []rune(",")
func isComma(b rune) bool {
return b == ','
}
func newCommaToken() Token {
return newToken(TokenComma, commaRunes, NoneType)
}

View File

@ -0,0 +1,35 @@
package ini
// isComment will return whether or not the next byte(s) is a
// comment.
func isComment(b []rune) bool {
if len(b) == 0 {
return false
}
switch b[0] {
case ';':
return true
case '#':
return true
}
return false
}
// newCommentToken will create a comment token and
// return how many bytes were read.
func newCommentToken(b []rune) (Token, int, error) {
i := 0
for ; i < len(b); i++ {
if b[i] == '\n' {
break
}
if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
break
}
}
return newToken(TokenComment, b[:i], NoneType), i, nil
}

29
vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Package ini is an LL(1) parser for configuration files.
//
// Example:
// sections, err := ini.OpenFile("/path/to/file")
// if err != nil {
// panic(err)
// }
//
// profile := "foo"
// section, ok := sections.GetSection(profile)
// if !ok {
// fmt.Printf("section %q could not be found", profile)
// }
//
// Below is the BNF that describes this parser
// Grammar:
// stmt -> value stmt'
// stmt' -> epsilon | op stmt
// value -> number | string | boolean | quoted_string
//
// section -> [ section'
// section' -> value section_close
// section_close -> ]
//
// SkipState will skip (NL WS)+
//
// comment -> # comment' | ; comment'
// comment' -> epsilon | value
package ini

View File

@ -0,0 +1,4 @@
package ini
// emptyToken is used to satisfy the Token interface
var emptyToken = newToken(TokenNone, []rune{}, NoneType)

View File

@ -0,0 +1,24 @@
package ini
// newExpression will return an expression AST.
// Expr represents an expression
//
// grammar:
// expr -> string | number
func newExpression(tok Token) AST {
return newASTWithRootToken(ASTKindExpr, tok)
}
func newEqualExpr(left AST, tok Token) AST {
return newASTWithRootToken(ASTKindEqualExpr, tok, left)
}
// EqualExprKey will return a LHS value in the equal expr
func EqualExprKey(ast AST) string {
children := ast.GetChildren()
if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
return ""
}
return string(children[0].Root.Raw())
}

17
vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// +build gofuzz
package ini
import (
"bytes"
)
func Fuzz(data []byte) int {
b := bytes.NewReader(data)
if _, err := Parse(b); err != nil {
return 0
}
return 1
}

51
vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package ini
import (
"io"
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// OpenFile takes a path to a given file, and will open and parse
// that file.
func OpenFile(path string) (Sections, error) {
f, err := os.Open(path)
if err != nil {
return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err)
}
defer f.Close()
return Parse(f)
}
// Parse will parse the given file using the shared config
// visitor.
func Parse(f io.Reader) (Sections, error) {
tree, err := ParseAST(f)
if err != nil {
return Sections{}, err
}
v := NewDefaultVisitor()
if err = Walk(tree, v); err != nil {
return Sections{}, err
}
return v.Sections, nil
}
// ParseBytes will parse the given bytes and return the parsed sections.
func ParseBytes(b []byte) (Sections, error) {
tree, err := ParseASTBytes(b)
if err != nil {
return Sections{}, err
}
v := NewDefaultVisitor()
if err = Walk(tree, v); err != nil {
return Sections{}, err
}
return v.Sections, nil
}

View File

@ -0,0 +1,165 @@
package ini
import (
"bytes"
"io"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/awserr"
)
const (
// ErrCodeUnableToReadFile is used when a file is failed to be
// opened or read from.
ErrCodeUnableToReadFile = "FailedRead"
)
// TokenType represents the various different tokens types
type TokenType int
func (t TokenType) String() string {
switch t {
case TokenNone:
return "none"
case TokenLit:
return "literal"
case TokenSep:
return "sep"
case TokenOp:
return "op"
case TokenWS:
return "ws"
case TokenNL:
return "newline"
case TokenComment:
return "comment"
case TokenComma:
return "comma"
default:
return ""
}
}
// TokenType enums
const (
TokenNone = TokenType(iota)
TokenLit
TokenSep
TokenComma
TokenOp
TokenWS
TokenNL
TokenComment
)
type iniLexer struct{}
// Tokenize will return a list of tokens during lexical analysis of the
// io.Reader.
func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err)
}
return l.tokenize(b)
}
func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
runes := bytes.Runes(b)
var err error
n := 0
tokenAmount := countTokens(runes)
tokens := make([]Token, tokenAmount)
count := 0
for len(runes) > 0 && count < tokenAmount {
switch {
case isWhitespace(runes[0]):
tokens[count], n, err = newWSToken(runes)
case isComma(runes[0]):
tokens[count], n = newCommaToken(), 1
case isComment(runes):
tokens[count], n, err = newCommentToken(runes)
case isNewline(runes):
tokens[count], n, err = newNewlineToken(runes)
case isSep(runes):
tokens[count], n, err = newSepToken(runes)
case isOp(runes):
tokens[count], n, err = newOpToken(runes)
default:
tokens[count], n, err = newLitToken(runes)
}
if err != nil {
return nil, err
}
count++
runes = runes[n:]
}
return tokens[:count], nil
}
func countTokens(runes []rune) int {
count, n := 0, 0
var err error
for len(runes) > 0 {
switch {
case isWhitespace(runes[0]):
_, n, err = newWSToken(runes)
case isComma(runes[0]):
_, n = newCommaToken(), 1
case isComment(runes):
_, n, err = newCommentToken(runes)
case isNewline(runes):
_, n, err = newNewlineToken(runes)
case isSep(runes):
_, n, err = newSepToken(runes)
case isOp(runes):
_, n, err = newOpToken(runes)
default:
_, n, err = newLitToken(runes)
}
if err != nil {
return 0
}
count++
runes = runes[n:]
}
return count + 1
}
// Token indicates a metadata about a given value.
type Token struct {
t TokenType
ValueType ValueType
base int
raw []rune
}
var emptyValue = Value{}
func newToken(t TokenType, raw []rune, v ValueType) Token {
return Token{
t: t,
raw: raw,
ValueType: v,
}
}
// Raw return the raw runes that were consumed
func (tok Token) Raw() []rune {
return tok.raw
}
// Type returns the token type
func (tok Token) Type() TokenType {
return tok.t
}

View File

@ -0,0 +1,347 @@
package ini
import (
"fmt"
"io"
)
// State enums for the parse table
const (
InvalidState = iota
// stmt -> value stmt'
StatementState
// stmt' -> MarkComplete | op stmt
StatementPrimeState
// value -> number | string | boolean | quoted_string
ValueState
// section -> [ section'
OpenScopeState
// section' -> value section_close
SectionState
// section_close -> ]
CloseScopeState
// SkipState will skip (NL WS)+
SkipState
// SkipTokenState will skip any token and push the previous
// state onto the stack.
SkipTokenState
// comment -> # comment' | ; comment'
// comment' -> MarkComplete | value
CommentState
// MarkComplete state will complete statements and move that
// to the completed AST list
MarkCompleteState
// TerminalState signifies that the tokens have been fully parsed
TerminalState
)
// parseTable is a state machine to dictate the grammar above.
var parseTable = map[ASTKind]map[TokenType]int{
ASTKindStart: map[TokenType]int{
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: TerminalState,
},
ASTKindCommentStatement: map[TokenType]int{
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindExpr: map[TokenType]int{
TokenOp: StatementPrimeState,
TokenLit: ValueState,
TokenSep: OpenScopeState,
TokenWS: ValueState,
TokenNL: SkipState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindEqualExpr: map[TokenType]int{
TokenLit: ValueState,
TokenWS: SkipTokenState,
TokenNL: SkipState,
},
ASTKindStatement: map[TokenType]int{
TokenLit: SectionState,
TokenSep: CloseScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindExprStatement: map[TokenType]int{
TokenLit: ValueState,
TokenSep: OpenScopeState,
TokenOp: ValueState,
TokenWS: ValueState,
TokenNL: MarkCompleteState,
TokenComment: CommentState,
TokenNone: TerminalState,
TokenComma: SkipState,
},
ASTKindSectionStatement: map[TokenType]int{
TokenLit: SectionState,
TokenOp: SectionState,
TokenSep: CloseScopeState,
TokenWS: SectionState,
TokenNL: SkipTokenState,
},
ASTKindCompletedSectionStatement: map[TokenType]int{
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindSkipStatement: map[TokenType]int{
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: TerminalState,
},
}
// ParseAST will parse input from an io.Reader using
// an LL(1) parser.
func ParseAST(r io.Reader) ([]AST, error) {
lexer := iniLexer{}
tokens, err := lexer.Tokenize(r)
if err != nil {
return []AST{}, err
}
return parse(tokens)
}
// ParseASTBytes will parse input from a byte slice using
// an LL(1) parser.
func ParseASTBytes(b []byte) ([]AST, error) {
lexer := iniLexer{}
tokens, err := lexer.tokenize(b)
if err != nil {
return []AST{}, err
}
return parse(tokens)
}
func parse(tokens []Token) ([]AST, error) {
start := Start
stack := newParseStack(3, len(tokens))
stack.Push(start)
s := newSkipper()
loop:
for stack.Len() > 0 {
k := stack.Pop()
var tok Token
if len(tokens) == 0 {
// this occurs when all the tokens have been processed
// but reduction of what's left on the stack needs to
// occur.
tok = emptyToken
} else {
tok = tokens[0]
}
step := parseTable[k.Kind][tok.Type()]
if s.ShouldSkip(tok) {
// being in a skip state with no tokens will break out of
// the parse loop since there is nothing left to process.
if len(tokens) == 0 {
break loop
}
step = SkipTokenState
}
switch step {
case TerminalState:
// Finished parsing. Push what should be the last
// statement to the stack. If there is anything left
// on the stack, an error in parsing has occurred.
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
break loop
case SkipTokenState:
// When skipping a token, the previous state was popped off the stack.
// To maintain the correct state, the previous state will be pushed
// onto the stack.
stack.Push(k)
case StatementState:
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
expr := newExpression(tok)
stack.Push(expr)
case StatementPrimeState:
if tok.Type() != TokenOp {
stack.MarkComplete(k)
continue
}
if k.Kind != ASTKindExpr {
return nil, NewParseError(
fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
)
}
k = trimSpaces(k)
expr := newEqualExpr(k, tok)
stack.Push(expr)
case ValueState:
// ValueState requires the previous state to either be an equal expression
// or an expression statement.
//
// This grammar occurs when the RHS is a number, word, or quoted string.
// equal_expr -> lit op equal_expr'
// equal_expr' -> number | string | quoted_string
// quoted_string -> " quoted_string'
// quoted_string' -> string quoted_string_end
// quoted_string_end -> "
//
// otherwise
// expr_stmt -> equal_expr (expr_stmt')*
// expr_stmt' -> ws S | op S | MarkComplete
// S -> equal_expr' expr_stmt'
switch k.Kind {
case ASTKindEqualExpr:
// assiging a value to some key
k.AppendChild(newExpression(tok))
stack.Push(newExprStatement(k))
case ASTKindExpr:
k.Root.raw = append(k.Root.raw, tok.Raw()...)
stack.Push(k)
case ASTKindExprStatement:
root := k.GetRoot()
children := root.GetChildren()
if len(children) == 0 {
return nil, NewParseError(
fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
)
}
rhs := children[len(children)-1]
if rhs.Root.ValueType != QuotedStringType {
rhs.Root.ValueType = StringType
rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
}
children[len(children)-1] = rhs
k.SetChildren(children)
stack.Push(k)
}
case OpenScopeState:
if !runeCompare(tok.Raw(), openBrace) {
return nil, NewParseError("expected '['")
}
stmt := newStatement()
stack.Push(stmt)
case CloseScopeState:
if !runeCompare(tok.Raw(), closeBrace) {
return nil, NewParseError("expected ']'")
}
k = trimSpaces(k)
stack.Push(newCompletedSectionStatement(k))
case SectionState:
var stmt AST
switch k.Kind {
case ASTKindStatement:
// If there are multiple literals inside of a scope declaration,
// then the current token's raw value will be appended to the Name.
//
// This handles cases like [ profile default ]
//
// k will represent a SectionStatement with the children representing
// the label of the section
stmt = newSectionStatement(tok)
case ASTKindSectionStatement:
k.Root.raw = append(k.Root.raw, tok.Raw()...)
stmt = k
default:
return nil, NewParseError(
fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
)
}
stack.Push(stmt)
case MarkCompleteState:
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
if stack.Len() == 0 {
stack.Push(start)
}
case SkipState:
stack.Push(newSkipStatement(k))
s.Skip()
case CommentState:
if k.Kind == ASTKindStart {
stack.Push(k)
} else {
stack.MarkComplete(k)
}
stmt := newCommentStatement(tok)
stack.Push(stmt)
default:
return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok))
}
if len(tokens) > 0 {
tokens = tokens[1:]
}
}
// this occurs when a statement has not been completed
if stack.top > 1 {
return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container))
}
// returns a sublist which excludes the start symbol
return stack.List(), nil
}
// trimSpaces will trim spaces on the left and right hand side of
// the literal.
func trimSpaces(k AST) AST {
// trim left hand side of spaces
for i := 0; i < len(k.Root.raw); i++ {
if !isWhitespace(k.Root.raw[i]) {
break
}
k.Root.raw = k.Root.raw[1:]
i--
}
// trim right hand side of spaces
for i := len(k.Root.raw) - 1; i >= 0; i-- {
if !isWhitespace(k.Root.raw[i]) {
break
}
k.Root.raw = k.Root.raw[:len(k.Root.raw)-1]
}
return k
}

View File

@ -0,0 +1,324 @@
package ini
import (
"fmt"
"strconv"
"strings"
)
var (
runesTrue = []rune("true")
runesFalse = []rune("false")
)
var literalValues = [][]rune{
runesTrue,
runesFalse,
}
func isBoolValue(b []rune) bool {
for _, lv := range literalValues {
if isLitValue(lv, b) {
return true
}
}
return false
}
func isLitValue(want, have []rune) bool {
if len(have) < len(want) {
return false
}
for i := 0; i < len(want); i++ {
if want[i] != have[i] {
return false
}
}
return true
}
// isNumberValue will return whether not the leading characters in
// a byte slice is a number. A number is delimited by whitespace or
// the newline token.
//
// A number is defined to be in a binary, octal, decimal (int | float), hex format,
// or in scientific notation.
func isNumberValue(b []rune) bool {
negativeIndex := 0
helper := numberHelper{}
needDigit := false
for i := 0; i < len(b); i++ {
negativeIndex++
switch b[i] {
case '-':
if helper.IsNegative() || negativeIndex != 1 {
return false
}
helper.Determine(b[i])
needDigit = true
continue
case 'e', 'E':
if err := helper.Determine(b[i]); err != nil {
return false
}
negativeIndex = 0
needDigit = true
continue
case 'b':
if helper.numberFormat == hex {
break
}
fallthrough
case 'o', 'x':
needDigit = true
if i == 0 {
return false
}
fallthrough
case '.':
if err := helper.Determine(b[i]); err != nil {
return false
}
needDigit = true
continue
}
if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
return !needDigit
}
if !helper.CorrectByte(b[i]) {
return false
}
needDigit = false
}
return !needDigit
}
func isValid(b []rune) (bool, int, error) {
if len(b) == 0 {
// TODO: should probably return an error
return false, 0, nil
}
return isValidRune(b[0]), 1, nil
}
func isValidRune(r rune) bool {
return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
}
// ValueType is an enum that will signify what type
// the Value is
type ValueType int
func (v ValueType) String() string {
switch v {
case NoneType:
return "NONE"
case DecimalType:
return "FLOAT"
case IntegerType:
return "INT"
case StringType:
return "STRING"
case BoolType:
return "BOOL"
}
return ""
}
// ValueType enums
const (
NoneType = ValueType(iota)
DecimalType
IntegerType
StringType
QuotedStringType
BoolType
)
// Value is a union container
type Value struct {
Type ValueType
raw []rune
integer int64
decimal float64
boolean bool
str string
}
func newValue(t ValueType, base int, raw []rune) (Value, error) {
v := Value{
Type: t,
raw: raw,
}
var err error
switch t {
case DecimalType:
v.decimal, err = strconv.ParseFloat(string(raw), 64)
case IntegerType:
if base != 10 {
raw = raw[2:]
}
v.integer, err = strconv.ParseInt(string(raw), base, 64)
case StringType:
v.str = string(raw)
case QuotedStringType:
v.str = string(raw[1 : len(raw)-1])
case BoolType:
v.boolean = runeCompare(v.raw, runesTrue)
}
// issue 2253
//
// if the value trying to be parsed is too large, then we will use
// the 'StringType' and raw value instead.
if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
v.Type = StringType
v.str = string(raw)
err = nil
}
return v, err
}
// Append will append values and change the type to a string
// type.
func (v *Value) Append(tok Token) {
r := tok.Raw()
if v.Type != QuotedStringType {
v.Type = StringType
r = tok.raw[1 : len(tok.raw)-1]
}
if tok.Type() != TokenLit {
v.raw = append(v.raw, tok.Raw()...)
} else {
v.raw = append(v.raw, r...)
}
}
func (v Value) String() string {
switch v.Type {
case DecimalType:
return fmt.Sprintf("decimal: %f", v.decimal)
case IntegerType:
return fmt.Sprintf("integer: %d", v.integer)
case StringType:
return fmt.Sprintf("string: %s", string(v.raw))
case QuotedStringType:
return fmt.Sprintf("quoted string: %s", string(v.raw))
case BoolType:
return fmt.Sprintf("bool: %t", v.boolean)
default:
return "union not set"
}
}
func newLitToken(b []rune) (Token, int, error) {
n := 0
var err error
token := Token{}
if b[0] == '"' {
n, err = getStringValue(b)
if err != nil {
return token, n, err
}
token = newToken(TokenLit, b[:n], QuotedStringType)
} else if isNumberValue(b) {
var base int
base, n, err = getNumericalValue(b)
if err != nil {
return token, 0, err
}
value := b[:n]
vType := IntegerType
if contains(value, '.') || hasExponent(value) {
vType = DecimalType
}
token = newToken(TokenLit, value, vType)
token.base = base
} else if isBoolValue(b) {
n, err = getBoolValue(b)
token = newToken(TokenLit, b[:n], BoolType)
} else {
n, err = getValue(b)
token = newToken(TokenLit, b[:n], StringType)
}
return token, n, err
}
// IntValue returns an integer value
func (v Value) IntValue() int64 {
return v.integer
}
// FloatValue returns a float value
func (v Value) FloatValue() float64 {
return v.decimal
}
// BoolValue returns a bool value
func (v Value) BoolValue() bool {
return v.boolean
}
func isTrimmable(r rune) bool {
switch r {
case '\n', ' ':
return true
}
return false
}
// StringValue returns the string value
func (v Value) StringValue() string {
switch v.Type {
case StringType:
return strings.TrimFunc(string(v.raw), isTrimmable)
case QuotedStringType:
// preserve all characters in the quotes
return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
default:
return strings.TrimFunc(string(v.raw), isTrimmable)
}
}
func contains(runes []rune, c rune) bool {
for i := 0; i < len(runes); i++ {
if runes[i] == c {
return true
}
}
return false
}
func runeCompare(v1 []rune, v2 []rune) bool {
if len(v1) != len(v2) {
return false
}
for i := 0; i < len(v1); i++ {
if v1[i] != v2[i] {
return false
}
}
return true
}

View File

@ -0,0 +1,30 @@
package ini
func isNewline(b []rune) bool {
if len(b) == 0 {
return false
}
if b[0] == '\n' {
return true
}
if len(b) < 2 {
return false
}
return b[0] == '\r' && b[1] == '\n'
}
func newNewlineToken(b []rune) (Token, int, error) {
i := 1
if b[0] == '\r' && isNewline(b[1:]) {
i++
}
if !isNewline([]rune(b[:i])) {
return emptyToken, 0, NewParseError("invalid new line token")
}
return newToken(TokenNL, b[:i], NoneType), i, nil
}

View File

@ -0,0 +1,152 @@
package ini
import (
"bytes"
"fmt"
"strconv"
)
const (
none = numberFormat(iota)
binary
octal
decimal
hex
exponent
)
type numberFormat int
// numberHelper is used to dictate what format a number is in
// and what to do for negative values. Since -1e-4 is a valid
// number, we cannot just simply check for duplicate negatives.
type numberHelper struct {
numberFormat numberFormat
negative bool
negativeExponent bool
}
func (b numberHelper) Exists() bool {
return b.numberFormat != none
}
func (b numberHelper) IsNegative() bool {
return b.negative || b.negativeExponent
}
func (b *numberHelper) Determine(c rune) error {
if b.Exists() {
return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
}
switch c {
case 'b':
b.numberFormat = binary
case 'o':
b.numberFormat = octal
case 'x':
b.numberFormat = hex
case 'e', 'E':
b.numberFormat = exponent
case '-':
if b.numberFormat != exponent {
b.negative = true
} else {
b.negativeExponent = true
}
case '.':
b.numberFormat = decimal
default:
return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
}
return nil
}
func (b numberHelper) CorrectByte(c rune) bool {
switch {
case b.numberFormat == binary:
if !isBinaryByte(c) {
return false
}
case b.numberFormat == octal:
if !isOctalByte(c) {
return false
}
case b.numberFormat == hex:
if !isHexByte(c) {
return false
}
case b.numberFormat == decimal:
if !isDigit(c) {
return false
}
case b.numberFormat == exponent:
if !isDigit(c) {
return false
}
case b.negativeExponent:
if !isDigit(c) {
return false
}
case b.negative:
if !isDigit(c) {
return false
}
default:
if !isDigit(c) {
return false
}
}
return true
}
func (b numberHelper) Base() int {
switch b.numberFormat {
case binary:
return 2
case octal:
return 8
case hex:
return 16
default:
return 10
}
}
func (b numberHelper) String() string {
buf := bytes.Buffer{}
i := 0
switch b.numberFormat {
case binary:
i++
buf.WriteString(strconv.Itoa(i) + ": binary format\n")
case octal:
i++
buf.WriteString(strconv.Itoa(i) + ": octal format\n")
case hex:
i++
buf.WriteString(strconv.Itoa(i) + ": hex format\n")
case exponent:
i++
buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
default:
i++
buf.WriteString(strconv.Itoa(i) + ": integer format\n")
}
if b.negative {
i++
buf.WriteString(strconv.Itoa(i) + ": negative format\n")
}
if b.negativeExponent {
i++
buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
}
return buf.String()
}

View File

@ -0,0 +1,39 @@
package ini
import (
"fmt"
)
var (
equalOp = []rune("=")
equalColonOp = []rune(":")
)
func isOp(b []rune) bool {
if len(b) == 0 {
return false
}
switch b[0] {
case '=':
return true
case ':':
return true
default:
return false
}
}
func newOpToken(b []rune) (Token, int, error) {
tok := Token{}
switch b[0] {
case '=':
tok = newToken(TokenOp, equalOp, NoneType)
case ':':
tok = newToken(TokenOp, equalColonOp, NoneType)
default:
return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
}
return tok, 1, nil
}

View File

@ -0,0 +1,43 @@
package ini
import "fmt"
const (
// ErrCodeParseError is returned when a parsing error
// has occurred.
ErrCodeParseError = "INIParseError"
)
// ParseError is an error which is returned during any part of
// the parsing process.
type ParseError struct {
msg string
}
// NewParseError will return a new ParseError where message
// is the description of the error.
func NewParseError(message string) *ParseError {
return &ParseError{
msg: message,
}
}
// Code will return the ErrCodeParseError
func (err *ParseError) Code() string {
return ErrCodeParseError
}
// Message returns the error's message
func (err *ParseError) Message() string {
return err.msg
}
// OrigError return nothing since there will never be any
// original error.
func (err *ParseError) OrigError() error {
return nil
}
func (err *ParseError) Error() string {
return fmt.Sprintf("%s: %s", err.Code(), err.Message())
}

View File

@ -0,0 +1,60 @@
package ini
import (
"bytes"
"fmt"
)
// ParseStack is a stack that contains a container, the stack portion,
// and the list which is the list of ASTs that have been successfully
// parsed.
type ParseStack struct {
top int
container []AST
list []AST
index int
}
func newParseStack(sizeContainer, sizeList int) ParseStack {
return ParseStack{
container: make([]AST, sizeContainer),
list: make([]AST, sizeList),
}
}
// Pop will return and truncate the last container element.
func (s *ParseStack) Pop() AST {
s.top--
return s.container[s.top]
}
// Push will add the new AST to the container
func (s *ParseStack) Push(ast AST) {
s.container[s.top] = ast
s.top++
}
// MarkComplete will append the AST to the list of completed statements
func (s *ParseStack) MarkComplete(ast AST) {
s.list[s.index] = ast
s.index++
}
// List will return the completed statements
func (s ParseStack) List() []AST {
return s.list[:s.index]
}
// Len will return the length of the container
func (s *ParseStack) Len() int {
return s.top
}
func (s ParseStack) String() string {
buf := bytes.Buffer{}
for i, node := range s.list {
buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
}
return buf.String()
}

View File

@ -0,0 +1,41 @@
package ini
import (
"fmt"
)
var (
emptyRunes = []rune{}
)
func isSep(b []rune) bool {
if len(b) == 0 {
return false
}
switch b[0] {
case '[', ']':
return true
default:
return false
}
}
var (
openBrace = []rune("[")
closeBrace = []rune("]")
)
func newSepToken(b []rune) (Token, int, error) {
tok := Token{}
switch b[0] {
case '[':
tok = newToken(TokenSep, openBrace, NoneType)
case ']':
tok = newToken(TokenSep, closeBrace, NoneType)
default:
return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
}
return tok, 1, nil
}

View File

@ -0,0 +1,45 @@
package ini
// skipper is used to skip certain blocks of an ini file.
// Currently skipper is used to skip nested blocks of ini
// files. See example below
//
// [ foo ]
// nested = ; this section will be skipped
// a=b
// c=d
// bar=baz ; this will be included
type skipper struct {
shouldSkip bool
TokenSet bool
prevTok Token
}
func newSkipper() skipper {
return skipper{
prevTok: emptyToken,
}
}
func (s *skipper) ShouldSkip(tok Token) bool {
if s.shouldSkip &&
s.prevTok.Type() == TokenNL &&
tok.Type() != TokenWS {
s.Continue()
return false
}
s.prevTok = tok
return s.shouldSkip
}
func (s *skipper) Skip() {
s.shouldSkip = true
s.prevTok = emptyToken
}
func (s *skipper) Continue() {
s.shouldSkip = false
s.prevTok = emptyToken
}

View File

@ -0,0 +1,35 @@
package ini
// Statement is an empty AST mostly used for transitioning states.
func newStatement() AST {
return newAST(ASTKindStatement, AST{})
}
// SectionStatement represents a section AST
func newSectionStatement(tok Token) AST {
return newASTWithRootToken(ASTKindSectionStatement, tok)
}
// ExprStatement represents a completed expression AST
func newExprStatement(ast AST) AST {
return newAST(ASTKindExprStatement, ast)
}
// CommentStatement represents a comment in the ini definition.
//
// grammar:
// comment -> #comment' | ;comment'
// comment' -> epsilon | value
func newCommentStatement(tok Token) AST {
return newAST(ASTKindCommentStatement, newExpression(tok))
}
// CompletedSectionStatement represents a completed section
func newCompletedSectionStatement(ast AST) AST {
return newAST(ASTKindCompletedSectionStatement, ast)
}
// SkipStatement is used to skip whole statements
func newSkipStatement(ast AST) AST {
return newAST(ASTKindSkipStatement, ast)
}

View File

@ -0,0 +1,284 @@
package ini
import (
"fmt"
)
// getStringValue will return a quoted string and the amount
// of bytes read
//
// an error will be returned if the string is not properly formatted
func getStringValue(b []rune) (int, error) {
if b[0] != '"' {
return 0, NewParseError("strings must start with '\"'")
}
endQuote := false
i := 1
for ; i < len(b) && !endQuote; i++ {
if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
endQuote = true
break
} else if escaped {
/*c, err := getEscapedByte(b[i])
if err != nil {
return 0, err
}
b[i-1] = c
b = append(b[:i], b[i+1:]...)
i--*/
continue
}
}
if !endQuote {
return 0, NewParseError("missing '\"' in string value")
}
return i + 1, nil
}
// getBoolValue will return a boolean and the amount
// of bytes read
//
// an error will be returned if the boolean is not of a correct
// value
func getBoolValue(b []rune) (int, error) {
if len(b) < 4 {
return 0, NewParseError("invalid boolean value")
}
n := 0
for _, lv := range literalValues {
if len(lv) > len(b) {
continue
}
if isLitValue(lv, b) {
n = len(lv)
}
}
if n == 0 {
return 0, NewParseError("invalid boolean value")
}
return n, nil
}
// getNumericalValue will return a numerical string, the amount
// of bytes read, and the base of the number
//
// an error will be returned if the number is not of a correct
// value
func getNumericalValue(b []rune) (int, int, error) {
if !isDigit(b[0]) {
return 0, 0, NewParseError("invalid digit value")
}
i := 0
helper := numberHelper{}
loop:
for negativeIndex := 0; i < len(b); i++ {
negativeIndex++
if !isDigit(b[i]) {
switch b[i] {
case '-':
if helper.IsNegative() || negativeIndex != 1 {
return 0, 0, NewParseError("parse error '-'")
}
n := getNegativeNumber(b[i:])
i += (n - 1)
helper.Determine(b[i])
continue
case '.':
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
case 'e', 'E':
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
negativeIndex = 0
case 'b':
if helper.numberFormat == hex {
break
}
fallthrough
case 'o', 'x':
if i == 0 && b[i] != '0' {
return 0, 0, NewParseError("incorrect base format, expected leading '0'")
}
if i != 1 {
return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
}
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
default:
if isWhitespace(b[i]) {
break loop
}
if isNewline(b[i:]) {
break loop
}
if !(helper.numberFormat == hex && isHexByte(b[i])) {
if i+2 < len(b) && !isNewline(b[i:i+2]) {
return 0, 0, NewParseError("invalid numerical character")
} else if !isNewline([]rune{b[i]}) {
return 0, 0, NewParseError("invalid numerical character")
}
break loop
}
}
}
}
return helper.Base(), i, nil
}
// isDigit will return whether or not something is an integer
func isDigit(b rune) bool {
return b >= '0' && b <= '9'
}
func hasExponent(v []rune) bool {
return contains(v, 'e') || contains(v, 'E')
}
func isBinaryByte(b rune) bool {
switch b {
case '0', '1':
return true
default:
return false
}
}
func isOctalByte(b rune) bool {
switch b {
case '0', '1', '2', '3', '4', '5', '6', '7':
return true
default:
return false
}
}
func isHexByte(b rune) bool {
if isDigit(b) {
return true
}
return (b >= 'A' && b <= 'F') ||
(b >= 'a' && b <= 'f')
}
func getValue(b []rune) (int, error) {
i := 0
for i < len(b) {
if isNewline(b[i:]) {
break
}
if isOp(b[i:]) {
break
}
valid, n, err := isValid(b[i:])
if err != nil {
return 0, err
}
if !valid {
break
}
i += n
}
return i, nil
}
// getNegativeNumber will return a negative number from a
// byte slice. This will iterate through all characters until
// a non-digit has been found.
func getNegativeNumber(b []rune) int {
if b[0] != '-' {
return 0
}
i := 1
for ; i < len(b); i++ {
if !isDigit(b[i]) {
return i
}
}
return i
}
// isEscaped will return whether or not the character is an escaped
// character.
func isEscaped(value []rune, b rune) bool {
if len(value) == 0 {
return false
}
switch b {
case '\'': // single quote
case '"': // quote
case 'n': // newline
case 't': // tab
case '\\': // backslash
default:
return false
}
return value[len(value)-1] == '\\'
}
func getEscapedByte(b rune) (rune, error) {
switch b {
case '\'': // single quote
return '\'', nil
case '"': // quote
return '"', nil
case 'n': // newline
return '\n', nil
case 't': // table
return '\t', nil
case '\\': // backslash
return '\\', nil
default:
return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
}
}
func removeEscapedCharacters(b []rune) []rune {
for i := 0; i < len(b); i++ {
if isEscaped(b[:i], b[i]) {
c, err := getEscapedByte(b[i])
if err != nil {
return b
}
b[i-1] = c
b = append(b[:i], b[i+1:]...)
i--
}
}
return b
}

View File

@ -0,0 +1,166 @@
package ini
import (
"fmt"
"sort"
)
// Visitor is an interface used by walkers that will
// traverse an array of ASTs.
type Visitor interface {
VisitExpr(AST) error
VisitStatement(AST) error
}
// DefaultVisitor is used to visit statements and expressions
// and ensure that they are both of the correct format.
// In addition, upon visiting this will build sections and populate
// the Sections field which can be used to retrieve profile
// configuration.
type DefaultVisitor struct {
scope string
Sections Sections
}
// NewDefaultVisitor return a DefaultVisitor
func NewDefaultVisitor() *DefaultVisitor {
return &DefaultVisitor{
Sections: Sections{
container: map[string]Section{},
},
}
}
// VisitExpr visits expressions...
func (v *DefaultVisitor) VisitExpr(expr AST) error {
t := v.Sections.container[v.scope]
if t.values == nil {
t.values = values{}
}
switch expr.Kind {
case ASTKindExprStatement:
opExpr := expr.GetRoot()
switch opExpr.Kind {
case ASTKindEqualExpr:
children := opExpr.GetChildren()
if len(children) <= 1 {
return NewParseError("unexpected token type")
}
rhs := children[1]
if rhs.Root.Type() != TokenLit {
return NewParseError("unexpected token type")
}
key := EqualExprKey(opExpr)
v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
if err != nil {
return err
}
t.values[key] = v
default:
return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
}
default:
return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
}
v.Sections.container[v.scope] = t
return nil
}
// VisitStatement visits statements...
func (v *DefaultVisitor) VisitStatement(stmt AST) error {
switch stmt.Kind {
case ASTKindCompletedSectionStatement:
child := stmt.GetRoot()
if child.Kind != ASTKindSectionStatement {
return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
}
name := string(child.Root.Raw())
v.Sections.container[name] = Section{}
v.scope = name
default:
return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
}
return nil
}
// Sections is a map of Section structures that represent
// a configuration.
type Sections struct {
container map[string]Section
}
// GetSection will return section p. If section p does not exist,
// false will be returned in the second parameter.
func (t Sections) GetSection(p string) (Section, bool) {
v, ok := t.container[p]
return v, ok
}
// values represents a map of union values.
type values map[string]Value
// List will return a list of all sections that were successfully
// parsed.
func (t Sections) List() []string {
keys := make([]string, len(t.container))
i := 0
for k := range t.container {
keys[i] = k
i++
}
sort.Strings(keys)
return keys
}
// Section contains a name and values. This represent
// a sectioned entry in a configuration file.
type Section struct {
Name string
values values
}
// Has will return whether or not an entry exists in a given section
func (t Section) Has(k string) bool {
_, ok := t.values[k]
return ok
}
// ValueType will returned what type the union is set to. If
// k was not found, the NoneType will be returned.
func (t Section) ValueType(k string) (ValueType, bool) {
v, ok := t.values[k]
return v.Type, ok
}
// Bool returns a bool value at k
func (t Section) Bool(k string) bool {
return t.values[k].BoolValue()
}
// Int returns an integer value at k
func (t Section) Int(k string) int64 {
return t.values[k].IntValue()
}
// Float64 returns a float value at k
func (t Section) Float64(k string) float64 {
return t.values[k].FloatValue()
}
// String returns the string value at k
func (t Section) String(k string) string {
_, ok := t.values[k]
if !ok {
return ""
}
return t.values[k].StringValue()
}

View File

@ -0,0 +1,25 @@
package ini
// Walk will traverse the AST using the v, the Visitor.
func Walk(tree []AST, v Visitor) error {
for _, node := range tree {
switch node.Kind {
case ASTKindExpr,
ASTKindExprStatement:
if err := v.VisitExpr(node); err != nil {
return err
}
case ASTKindStatement,
ASTKindCompletedSectionStatement,
ASTKindNestedSectionStatement,
ASTKindCompletedNestedSectionStatement:
if err := v.VisitStatement(node); err != nil {
return err
}
}
}
return nil
}

View File

@ -0,0 +1,24 @@
package ini
import (
"unicode"
)
// isWhitespace will return whether or not the character is
// a whitespace character.
//
// Whitespace is defined as a space or tab.
func isWhitespace(c rune) bool {
return unicode.IsSpace(c) && c != '\n' && c != '\r'
}
func newWSToken(b []rune) (Token, int, error) {
i := 0
for ; i < len(b); i++ {
if !isWhitespace(b[i]) {
break
}
}
return newToken(TokenWS, b[:i], NoneType), i, nil
}

23
vendor/github.com/aws/aws-sdk-go/internal/sdkuri/BUILD generated vendored Normal file
View File

@ -0,0 +1,23 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["path.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/aws/aws-sdk-go/internal/sdkuri",
importpath = "github.com/aws/aws-sdk-go/internal/sdkuri",
visibility = ["//vendor/github.com/aws/aws-sdk-go:__subpackages__"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,23 @@
package sdkuri
import (
"path"
"strings"
)
// PathJoin will join the elements of the path delimited by the "/"
// character. Similar to path.Join with the exception the trailing "/"
// character is preserved if present.
func PathJoin(elems ...string) string {
if len(elems) == 0 {
return ""
}
hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/")
str := path.Join(elems...)
if hasTrailing && str != "/" {
str += "/"
}
return str
}

View File

@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["shared_config.go"],
srcs = [
"ecs_container.go",
"shared_config.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults",
importpath = "github.com/aws/aws-sdk-go/internal/shareddefaults",
visibility = ["//vendor/github.com/aws/aws-sdk-go:__subpackages__"],

View File

@ -0,0 +1,12 @@
package shareddefaults
const (
// ECSCredsProviderEnvVar is an environmental variable key used to
// determine which path needs to be hit.
ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
)
// ECSContainerCredentialsURI is the endpoint to retrieve container
// credentials. This can be overridden to test to ensure the credential process
// is behaving correctly.
var ECSContainerCredentialsURI = "http://169.254.170.2"

View File

@ -3,9 +3,12 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"host.go",
"host_prefix.go",
"idempotency.go",
"jsonvalue.go",
"payload.go",
"timestamp.go",
"unmarshal.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/aws/aws-sdk-go/private/protocol",

View File

@ -27,7 +27,11 @@ func Unmarshal(r *request.Request) {
decoder := xml.NewDecoder(r.HTTPResponse.Body)
err := xmlutil.UnmarshalXML(r.Data, decoder, "")
if err != nil {
r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err)
r.Error = awserr.NewRequestFailure(
awserr.New("SerializationError", "failed decoding EC2 Query response", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
}
@ -52,7 +56,11 @@ func UnmarshalError(r *request.Request) {
resp := &xmlErrorResponse{}
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
if err != nil && err != io.EOF {
r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err)
r.Error = awserr.NewRequestFailure(
awserr.New("SerializationError", "failed decoding EC2 Query error response", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
} else {
r.Error = awserr.NewRequestFailure(
awserr.New(resp.Code, resp.Message, nil),

View File

@ -0,0 +1,68 @@
package protocol
import (
"strings"
"github.com/aws/aws-sdk-go/aws/request"
)
// ValidateEndpointHostHandler is a request handler that will validate the
// request endpoint's hosts is a valid RFC 3986 host.
var ValidateEndpointHostHandler = request.NamedHandler{
Name: "awssdk.protocol.ValidateEndpointHostHandler",
Fn: func(r *request.Request) {
err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host)
if err != nil {
r.Error = err
}
},
}
// ValidateEndpointHost validates that the host string passed in is a valid RFC
// 3986 host. Returns error if the host is not valid.
func ValidateEndpointHost(opName, host string) error {
paramErrs := request.ErrInvalidParams{Context: opName}
labels := strings.Split(host, ".")
for i, label := range labels {
if i == len(labels)-1 && len(label) == 0 {
// Allow trailing dot for FQDN hosts.
continue
}
if !ValidHostLabel(label) {
paramErrs.Add(request.NewErrParamFormat(
"endpoint host label", "[a-zA-Z0-9-]{1,63}", label))
}
}
if len(host) > 255 {
paramErrs.Add(request.NewErrParamMaxLen(
"endpoint host", 255, host,
))
}
if paramErrs.Len() > 0 {
return paramErrs
}
return nil
}
// ValidHostLabel returns if the label is a valid RFC 3986 host label.
func ValidHostLabel(label string) bool {
if l := len(label); l == 0 || l > 63 {
return false
}
for _, r := range label {
switch {
case r >= '0' && r <= '9':
case r >= 'A' && r <= 'Z':
case r >= 'a' && r <= 'z':
case r == '-':
default:
return false
}
}
return true
}

View File

@ -0,0 +1,54 @@
package protocol
import (
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
// HostPrefixHandlerName is the handler name for the host prefix request
// handler.
const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler"
// NewHostPrefixHandler constructs a build handler
func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler {
builder := HostPrefixBuilder{
Prefix: prefix,
LabelsFn: labelsFn,
}
return request.NamedHandler{
Name: HostPrefixHandlerName,
Fn: builder.Build,
}
}
// HostPrefixBuilder provides the request handler to expand and prepend
// the host prefix into the operation's request endpoint host.
type HostPrefixBuilder struct {
Prefix string
LabelsFn func() map[string]string
}
// Build updates the passed in Request with the HostPrefix template expanded.
func (h HostPrefixBuilder) Build(r *request.Request) {
if aws.BoolValue(r.Config.DisableEndpointHostPrefix) {
return
}
var labels map[string]string
if h.LabelsFn != nil {
labels = h.LabelsFn()
}
prefix := h.Prefix
for name, value := range labels {
prefix = strings.Replace(prefix, "{"+name+"}", value, -1)
}
r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host
if len(r.HTTPRequest.Host) > 0 {
r.HTTPRequest.Host = prefix + r.HTTPRequest.Host
}
}

View File

@ -216,7 +216,17 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro
default:
switch converted := value.Interface().(type) {
case time.Time:
buf.Write(strconv.AppendInt(scratch[:0], converted.UTC().Unix(), 10))
format := tag.Get("timestampFormat")
if len(format) == 0 {
format = protocol.UnixTimeFormatName
}
ts := protocol.FormatTime(format, converted)
if format != protocol.UnixTimeFormatName {
ts = `"` + ts + `"`
}
buf.WriteString(ts)
case []byte:
if !value.IsNil() {
buf.WriteByte('"')

View File

@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"reflect"
"time"
@ -17,16 +16,10 @@ import (
func UnmarshalJSON(v interface{}, stream io.Reader) error {
var out interface{}
b, err := ioutil.ReadAll(stream)
if err != nil {
return err
}
if len(b) == 0 {
err := json.NewDecoder(stream).Decode(&out)
if err == io.EOF {
return nil
}
if err := json.Unmarshal(b, &out); err != nil {
} else if err != nil {
return err
}
@ -172,9 +165,6 @@ func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag)
}
func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
errf := func() error {
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
}
switch d := data.(type) {
case nil:
@ -189,6 +179,17 @@ func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTa
return err
}
value.Set(reflect.ValueOf(b))
case *time.Time:
format := tag.Get("timestampFormat")
if len(format) == 0 {
format = protocol.ISO8601TimeFormatName
}
t, err := protocol.ParseTime(format, d)
if err != nil {
return err
}
value.Set(reflect.ValueOf(&t))
case aws.JSONValue:
// No need to use escaping as the value is a non-quoted string.
v, err := protocol.DecodeJSONValue(d, protocol.NoEscape)
@ -197,7 +198,7 @@ func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTa
}
value.Set(reflect.ValueOf(v))
default:
return errf()
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
}
case float64:
switch value.Interface().(type) {
@ -207,17 +208,18 @@ func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTa
case *float64:
value.Set(reflect.ValueOf(&d))
case *time.Time:
// Time unmarshaled from a float64 can only be epoch seconds
t := time.Unix(int64(d), 0).UTC()
value.Set(reflect.ValueOf(&t))
default:
return errf()
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
}
case bool:
switch value.Interface().(type) {
case *bool:
value.Set(reflect.ValueOf(&d))
default:
return errf()
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
}
default:
return fmt.Errorf("unsupported JSON value (%v)", data)

View File

@ -7,7 +7,7 @@ package jsonrpc
import (
"encoding/json"
"io/ioutil"
"io"
"strings"
"github.com/aws/aws-sdk-go/aws/awserr"
@ -64,7 +64,11 @@ func Unmarshal(req *request.Request) {
if req.DataFilled() {
err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body)
if err != nil {
req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err)
req.Error = awserr.NewRequestFailure(
awserr.New("SerializationError", "failed decoding JSON RPC response", err),
req.HTTPResponse.StatusCode,
req.RequestID,
)
}
}
return
@ -78,22 +82,22 @@ func UnmarshalMeta(req *request.Request) {
// UnmarshalError unmarshals an error response for a JSON RPC service.
func UnmarshalError(req *request.Request) {
defer req.HTTPResponse.Body.Close()
bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body)
if err != nil {
req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err)
return
}
if len(bodyBytes) == 0 {
var jsonErr jsonErrorResponse
err := json.NewDecoder(req.HTTPResponse.Body).Decode(&jsonErr)
if err == io.EOF {
req.Error = awserr.NewRequestFailure(
awserr.New("SerializationError", req.HTTPResponse.Status, nil),
req.HTTPResponse.StatusCode,
"",
req.RequestID,
)
return
}
var jsonErr jsonErrorResponse
if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err)
} else if err != nil {
req.Error = awserr.NewRequestFailure(
awserr.New("SerializationError", "failed decoding JSON RPC error response", err),
req.HTTPResponse.StatusCode,
req.RequestID,
)
return
}

View File

@ -233,7 +233,12 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta
v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
case time.Time:
const ISO8601UTC = "2006-01-02T15:04:05Z"
v.Set(name, value.UTC().Format(ISO8601UTC))
format := tag.Get("timestampFormat")
if len(format) == 0 {
format = protocol.ISO8601TimeFormatName
}
v.Set(name, protocol.FormatTime(format, value))
default:
return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
}

View File

@ -23,7 +23,11 @@ func Unmarshal(r *request.Request) {
decoder := xml.NewDecoder(r.HTTPResponse.Body)
err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
if err != nil {
r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
r.Error = awserr.NewRequestFailure(
awserr.New("SerializationError", "failed decoding Query response", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
}

View File

@ -28,7 +28,11 @@ func UnmarshalError(r *request.Request) {
bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
r.Error = awserr.NewRequestFailure(
awserr.New("SerializationError", "failed to read from query HTTP response body", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
@ -61,6 +65,10 @@ func UnmarshalError(r *request.Request) {
}
// Failed to retrieve any error message from the response body
r.Error = awserr.New("SerializationError",
"failed to decode query XML error response", decodeErr)
r.Error = awserr.NewRequestFailure(
awserr.New("SerializationError",
"failed to decode query XML error response", decodeErr),
r.HTTPResponse.StatusCode,
r.RequestID,
)
}

View File

@ -20,11 +20,6 @@ import (
"github.com/aws/aws-sdk-go/private/protocol"
)
// RFC1123GMT is a RFC1123 (RFC822) formated timestame. This format is not
// using the standard library's time.RFC1123 due to the desire to always use
// GMT as the timezone.
const RFC1123GMT = "Mon, 2 Jan 2006 15:04:05 GMT"
// Whether the byte value can be sent without escaping in AWS URLs
var noEscape [256]bool
@ -272,7 +267,14 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
case float64:
str = strconv.FormatFloat(value, 'f', -1, 64)
case time.Time:
str = value.UTC().Format(RFC1123GMT)
format := tag.Get("timestampFormat")
if len(format) == 0 {
format = protocol.RFC822TimeFormatName
if tag.Get("location") == "querystring" {
format = protocol.ISO8601TimeFormatName
}
}
str = protocol.FormatTime(format, value)
case aws.JSONValue:
if len(value) == 0 {
return "", errValueNotSet

View File

@ -198,7 +198,11 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
}
v.Set(reflect.ValueOf(&f))
case *time.Time:
t, err := time.Parse(time.RFC1123, header)
format := tag.Get("timestampFormat")
if len(format) == 0 {
format = protocol.RFC822TimeFormatName
}
t, err := protocol.ParseTime(format, header)
if err != nil {
return err
}

View File

@ -0,0 +1,72 @@
package protocol
import (
"strconv"
"time"
)
// Names of time formats supported by the SDK
const (
RFC822TimeFormatName = "rfc822"
ISO8601TimeFormatName = "iso8601"
UnixTimeFormatName = "unixTimestamp"
)
// Time formats supported by the SDK
const (
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
ISO8601TimeFormat = "2006-01-02T15:04:05Z"
)
// IsKnownTimestampFormat returns if the timestamp format name
// is know to the SDK's protocols.
func IsKnownTimestampFormat(name string) bool {
switch name {
case RFC822TimeFormatName:
fallthrough
case ISO8601TimeFormatName:
fallthrough
case UnixTimeFormatName:
return true
default:
return false
}
}
// FormatTime returns a string value of the time.
func FormatTime(name string, t time.Time) string {
t = t.UTC()
switch name {
case RFC822TimeFormatName:
return t.Format(RFC822TimeFormat)
case ISO8601TimeFormatName:
return t.Format(ISO8601TimeFormat)
case UnixTimeFormatName:
return strconv.FormatInt(t.Unix(), 10)
default:
panic("unknown timestamp format name, " + name)
}
}
// ParseTime attempts to parse the time given the format. Returns
// the time if it was able to be parsed, and fails otherwise.
func ParseTime(formatName, value string) (time.Time, error) {
switch formatName {
case RFC822TimeFormatName:
return time.Parse(RFC822TimeFormat, value)
case ISO8601TimeFormatName:
return time.Parse(ISO8601TimeFormat, value)
case UnixTimeFormatName:
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return time.Time{}, err
}
return time.Unix(int64(v), 0), nil
default:
panic("unknown timestamp format name, " + formatName)
}
}

View File

@ -13,9 +13,13 @@ import (
"github.com/aws/aws-sdk-go/private/protocol"
)
// BuildXML will serialize params into an xml.Encoder.
// Error will be returned if the serialization of any of the params or nested values fails.
// BuildXML will serialize params into an xml.Encoder. Error will be returned
// if the serialization of any of the params or nested values fails.
func BuildXML(params interface{}, e *xml.Encoder) error {
return buildXML(params, e, false)
}
func buildXML(params interface{}, e *xml.Encoder, sorted bool) error {
b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
root := NewXMLElement(xml.Name{})
if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
@ -23,7 +27,7 @@ func BuildXML(params interface{}, e *xml.Encoder) error {
}
for _, c := range root.Children {
for _, v := range c {
return StructToXML(e, v, false)
return StructToXML(e, v, sorted)
}
}
return nil
@ -83,15 +87,13 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle
}
}
// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested
// types are converted to XMLNodes also.
func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
if !value.IsValid() {
return nil
}
fieldAdded := false
// unwrap payloads
if payload := tag.Get("payload"); payload != "" {
field, _ := value.Type().FieldByName(payload)
@ -119,6 +121,8 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
child.Attr = append(child.Attr, ns)
}
var payloadFields, nonPayloadFields int
t := value.Type()
for i := 0; i < value.NumField(); i++ {
member := elemOf(value.Field(i))
@ -133,8 +137,10 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
mTag := field.Tag
if mTag.Get("location") != "" { // skip non-body members
nonPayloadFields++
continue
}
payloadFields++
if protocol.CanSetIdempotencyToken(value.Field(i), field) {
token := protocol.GetIdempotencyToken()
@ -149,11 +155,11 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
if err := b.buildValue(member, child, mTag); err != nil {
return err
}
fieldAdded = true
}
if fieldAdded { // only append this child if we have one ore more valid members
// Only case where the child shape is not added is if the shape only contains
// non-payload fields, e.g headers/query.
if !(payloadFields == 0 && nonPayloadFields > 0) {
current.AddChild(child)
}
@ -278,8 +284,12 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl
case float32:
str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
case time.Time:
const ISO8601UTC = "2006-01-02T15:04:05Z"
str = converted.UTC().Format(ISO8601UTC)
format := tag.Get("timestampFormat")
if len(format) == 0 {
format = protocol.ISO8601TimeFormatName
}
str = protocol.FormatTime(format, converted)
default:
return fmt.Errorf("unsupported value for param %s: %v (%s)",
tag.Get("locationName"), value.Interface(), value.Type().Name())

View File

@ -9,6 +9,8 @@ import (
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/private/protocol"
)
// UnmarshalXML deserializes an xml.Decoder into the container v. V
@ -253,8 +255,12 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
}
r.Set(reflect.ValueOf(&v))
case *time.Time:
const ISO8601UTC = "2006-01-02T15:04:05Z"
t, err := time.Parse(ISO8601UTC, node.Text)
format := tag.Get("timestampFormat")
if len(format) == 0 {
format = protocol.ISO8601TimeFormatName
}
t, err := protocol.ParseTime(format, node.Text)
if err != nil {
return err
}

View File

@ -29,6 +29,7 @@ func NewXMLElement(name xml.Name) *XMLNode {
// AddChild adds child to the XMLNode.
func (n *XMLNode) AddChild(child *XMLNode) {
child.parent = n
if _, ok := n.Children[child.Name.Local]; !ok {
n.Children[child.Name.Local] = []*XMLNode{}
}

File diff suppressed because it is too large Load Diff

View File

@ -5,8 +5,10 @@
//
// Amazon EC2 Auto Scaling is designed to automatically launch or terminate
// EC2 instances based on user-defined policies, schedules, and health checks.
// Use this service in conjunction with the AWS Auto Scaling, Amazon CloudWatch,
// and Elastic Load Balancing services.
// Use this service with AWS Auto Scaling, Amazon CloudWatch, and Elastic Load
// Balancing.
//
// For more information, see the Amazon EC2 Auto Scaling User Guide (http://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html).
//
// See https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01 for more information on this service.
//

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More