chore: upgrade dependencies

This commit is contained in:
2022-06-09 12:30:53 +02:00
parent 7203f3d6a1
commit dcb93ec8f7
518 changed files with 27809 additions and 3222 deletions

View File

@@ -0,0 +1,40 @@
# v1.1.7 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.6 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.5 (2022-04-27)
* **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors.
# v1.1.4 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.0 (2022-03-08)
* **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.0 (2022-02-24)
* **Release**: New module for computing checksums
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions

View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,323 @@
package checksum
import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"hash/crc32"
"io"
"strings"
"sync"
)
// Algorithm represents the checksum algorithms supported
type Algorithm string
// Enumeration values for supported checksum Algorithms.
const (
// AlgorithmCRC32C represents CRC32C hash algorithm
AlgorithmCRC32C Algorithm = "CRC32C"
// AlgorithmCRC32 represents CRC32 hash algorithm
AlgorithmCRC32 Algorithm = "CRC32"
// AlgorithmSHA1 represents SHA1 hash algorithm
AlgorithmSHA1 Algorithm = "SHA1"
// AlgorithmSHA256 represents SHA256 hash algorithm
AlgorithmSHA256 Algorithm = "SHA256"
)
var supportedAlgorithms = []Algorithm{
AlgorithmCRC32C,
AlgorithmCRC32,
AlgorithmSHA1,
AlgorithmSHA256,
}
func (a Algorithm) String() string { return string(a) }
// ParseAlgorithm attempts to parse the provided value into a checksum
// algorithm, matching without case. Returns the algorithm matched, or an error
// if the algorithm wasn't matched.
func ParseAlgorithm(v string) (Algorithm, error) {
for _, a := range supportedAlgorithms {
if strings.EqualFold(string(a), v) {
return a, nil
}
}
return "", fmt.Errorf("unknown checksum algorithm, %v", v)
}
// FilterSupportedAlgorithms filters the set of algorithms, returning a slice
// of algorithms that are supported.
func FilterSupportedAlgorithms(vs []string) []Algorithm {
found := map[Algorithm]struct{}{}
supported := make([]Algorithm, 0, len(supportedAlgorithms))
for _, v := range vs {
for _, a := range supportedAlgorithms {
// Only consider algorithms that are supported
if !strings.EqualFold(v, string(a)) {
continue
}
// Ignore duplicate algorithms in list.
if _, ok := found[a]; ok {
continue
}
supported = append(supported, a)
found[a] = struct{}{}
}
}
return supported
}
// NewAlgorithmHash returns a hash.Hash for the checksum algorithm. Error is
// returned if the algorithm is unknown.
func NewAlgorithmHash(v Algorithm) (hash.Hash, error) {
switch v {
case AlgorithmSHA1:
return sha1.New(), nil
case AlgorithmSHA256:
return sha256.New(), nil
case AlgorithmCRC32:
return crc32.NewIEEE(), nil
case AlgorithmCRC32C:
return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil
default:
return nil, fmt.Errorf("unknown checksum algorithm, %v", v)
}
}
// AlgorithmChecksumLength returns the length of the algorithm's checksum in
// bytes. If the algorithm is not known, an error is returned.
func AlgorithmChecksumLength(v Algorithm) (int, error) {
switch v {
case AlgorithmSHA1:
return sha1.Size, nil
case AlgorithmSHA256:
return sha256.Size, nil
case AlgorithmCRC32:
return crc32.Size, nil
case AlgorithmCRC32C:
return crc32.Size, nil
default:
return 0, fmt.Errorf("unknown checksum algorithm, %v", v)
}
}
const awsChecksumHeaderPrefix = "x-amz-checksum-"
// AlgorithmHTTPHeader returns the HTTP header for the algorithm's hash.
func AlgorithmHTTPHeader(v Algorithm) string {
return awsChecksumHeaderPrefix + strings.ToLower(string(v))
}
// base64EncodeHashSum computes base64 encoded checksum of a given running
// hash. The running hash must already have content written to it. Returns the
// byte slice of checksum and an error
func base64EncodeHashSum(h hash.Hash) []byte {
sum := h.Sum(nil)
sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
base64.StdEncoding.Encode(sum64, sum)
return sum64
}
// hexEncodeHashSum computes hex encoded checksum of a given running hash. The
// running hash must already have content written to it. Returns the byte slice
// of checksum and an error
func hexEncodeHashSum(h hash.Hash) []byte {
sum := h.Sum(nil)
sumHex := make([]byte, hex.EncodedLen(len(sum)))
hex.Encode(sumHex, sum)
return sumHex
}
// computeMD5Checksum computes base64 MD5 checksum of an io.Reader's contents.
// Returns the byte slice of MD5 checksum and an error.
func computeMD5Checksum(r io.Reader) ([]byte, error) {
h := md5.New()
// Copy errors may be assumed to be from the body.
if _, err := io.Copy(h, r); err != nil {
return nil, fmt.Errorf("failed compute MD5 hash of reader, %w", err)
}
// Encode the MD5 checksum in base64.
return base64EncodeHashSum(h), nil
}
// computeChecksumReader provides a reader wrapping an underlying io.Reader to
// compute the checksum of the stream's bytes.
type computeChecksumReader struct {
stream io.Reader
algorithm Algorithm
hasher hash.Hash
base64ChecksumLen int
mux sync.RWMutex
lockedChecksum string
lockedErr error
}
// newComputeChecksumReader returns a computeChecksumReader for the stream and
// algorithm specified. Returns error if unable to create the reader, or
// algorithm is unknown.
func newComputeChecksumReader(stream io.Reader, algorithm Algorithm) (*computeChecksumReader, error) {
hasher, err := NewAlgorithmHash(algorithm)
if err != nil {
return nil, err
}
checksumLength, err := AlgorithmChecksumLength(algorithm)
if err != nil {
return nil, err
}
return &computeChecksumReader{
stream: io.TeeReader(stream, hasher),
algorithm: algorithm,
hasher: hasher,
base64ChecksumLen: base64.StdEncoding.EncodedLen(checksumLength),
}, nil
}
// Read wraps the underlying reader. When the underlying reader returns EOF,
// the checksum of the reader will be computed, and can be retrieved with
// ChecksumBase64String.
func (r *computeChecksumReader) Read(p []byte) (int, error) {
n, err := r.stream.Read(p)
if err == nil {
return n, nil
} else if err != io.EOF {
r.mux.Lock()
defer r.mux.Unlock()
r.lockedErr = err
return n, err
}
b := base64EncodeHashSum(r.hasher)
r.mux.Lock()
defer r.mux.Unlock()
r.lockedChecksum = string(b)
return n, err
}
func (r *computeChecksumReader) Algorithm() Algorithm {
return r.algorithm
}
// Base64ChecksumLength returns the base64 encoded length of the checksum for
// algorithm.
func (r *computeChecksumReader) Base64ChecksumLength() int {
return r.base64ChecksumLen
}
// Base64Checksum returns the base64 checksum for the algorithm, or error if
// the underlying reader returned a non-EOF error.
//
// Safe to be called concurrently, but will return an error until after the
// underlying reader is returns EOF.
func (r *computeChecksumReader) Base64Checksum() (string, error) {
r.mux.RLock()
defer r.mux.RUnlock()
if r.lockedErr != nil {
return "", r.lockedErr
}
if r.lockedChecksum == "" {
return "", fmt.Errorf(
"checksum not available yet, called before reader returns EOF",
)
}
return r.lockedChecksum, nil
}
// validateChecksumReader implements io.ReadCloser interface. The wrapper
// performs checksum validation when the underlying reader has been fully read.
type validateChecksumReader struct {
originalBody io.ReadCloser
body io.Reader
hasher hash.Hash
algorithm Algorithm
expectChecksum string
}
// newValidateChecksumReader returns a configured io.ReadCloser that performs
// checksum validation when the underlying reader has been fully read.
func newValidateChecksumReader(
body io.ReadCloser,
algorithm Algorithm,
expectChecksum string,
) (*validateChecksumReader, error) {
hasher, err := NewAlgorithmHash(algorithm)
if err != nil {
return nil, err
}
return &validateChecksumReader{
originalBody: body,
body: io.TeeReader(body, hasher),
hasher: hasher,
algorithm: algorithm,
expectChecksum: expectChecksum,
}, nil
}
// Read attempts to read from the underlying stream while also updating the
// running hash. If the underlying stream returns with an EOF error, the
// checksum of the stream will be collected, and compared against the expected
// checksum. If the checksums do not match, an error will be returned.
//
// If a non-EOF error occurs when reading the underlying stream, that error
// will be returned and the checksum for the stream will be discarded.
func (c *validateChecksumReader) Read(p []byte) (n int, err error) {
n, err = c.body.Read(p)
if err == io.EOF {
if checksumErr := c.validateChecksum(); checksumErr != nil {
return n, checksumErr
}
}
return n, err
}
// Close closes the underlying reader, returning any error that occurred in the
// underlying reader.
func (c *validateChecksumReader) Close() (err error) {
return c.originalBody.Close()
}
func (c *validateChecksumReader) validateChecksum() error {
// Compute base64 encoded checksum hash of the payload's read bytes.
v := base64EncodeHashSum(c.hasher)
if e, a := c.expectChecksum, string(v); !strings.EqualFold(e, a) {
return validationError{
Algorithm: c.algorithm, Expect: e, Actual: a,
}
}
return nil
}
type validationError struct {
Algorithm Algorithm
Expect string
Actual string
}
func (v validationError) Error() string {
return fmt.Sprintf("checksum did not match: algorithm %v, expect %v, actual %v",
v.Algorithm, v.Expect, v.Actual)
}

View File

@@ -0,0 +1,389 @@
package checksum
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
const (
crlf = "\r\n"
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
defaultChunkLength = 1024 * 64
awsTrailerHeaderName = "x-amz-trailer"
decodedContentLengthHeaderName = "x-amz-decoded-content-length"
contentEncodingHeaderName = "content-encoding"
awsChunkedContentEncodingHeaderValue = "aws-chunked"
trailerKeyValueSeparator = ":"
)
var (
crlfBytes = []byte(crlf)
finalChunkBytes = []byte("0" + crlf)
)
type awsChunkedEncodingOptions struct {
// The total size of the stream. For unsigned encoding this implies that
// there will only be a single chunk containing the underlying payload,
// unless ChunkLength is also specified.
StreamLength int64
// Set of trailer key:value pairs that will be appended to the end of the
// payload after the end chunk has been written.
Trailers map[string]awsChunkedTrailerValue
// The maximum size of each chunk to be sent. Default value of -1, signals
// that optimal chunk length will be used automatically. ChunkSize must be
// at least 8KB.
//
// If ChunkLength and StreamLength are both specified, the stream will be
// broken up into ChunkLength chunks. The encoded length of the aws-chunked
// encoding can still be determined as long as all trailers, if any, have a
// fixed length.
ChunkLength int
}
type awsChunkedTrailerValue struct {
// Function to retrieve the value of the trailer. Will only be called after
// the underlying stream returns EOF error.
Get func() (string, error)
// If the length of the value can be pre-determined, and is constant
// specify the length. A value of -1 means the length is unknown, or
// cannot be pre-determined.
Length int
}
// awsChunkedEncoding provides a reader that wraps the payload such that
// payload is read as a single aws-chunk payload. This reader can only be used
// if the content length of payload is known. Content-Length is used as size of
// the single payload chunk. The final chunk and trailing checksum is appended
// at the end.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
//
// Here is the aws-chunked payload stream as read from the awsChunkedEncoding
// if original request stream is "Hello world", and checksum hash used is SHA256
//
// <b>\r\n
// Hello world\r\n
// 0\r\n
// x-amz-checksum-sha256:ZOyIygCyaOW6GjVnihtTFtIS9PNmskdyMlNKiuyjfzw=\r\n
// \r\n
type awsChunkedEncoding struct {
options awsChunkedEncodingOptions
encodedStream io.Reader
trailerEncodedLength int
}
// newUnsignedAWSChunkedEncoding returns a new awsChunkedEncoding configured
// for unsigned aws-chunked content encoding. Any additional trailers that need
// to be appended after the end chunk must be included as via Trailer
// callbacks.
func newUnsignedAWSChunkedEncoding(
stream io.Reader,
optFns ...func(*awsChunkedEncodingOptions),
) *awsChunkedEncoding {
options := awsChunkedEncodingOptions{
Trailers: map[string]awsChunkedTrailerValue{},
StreamLength: -1,
ChunkLength: -1,
}
for _, fn := range optFns {
fn(&options)
}
var chunkReader io.Reader
if options.ChunkLength != -1 || options.StreamLength == -1 {
if options.ChunkLength == -1 {
options.ChunkLength = defaultChunkLength
}
chunkReader = newBufferedAWSChunkReader(stream, options.ChunkLength)
} else {
chunkReader = newUnsignedChunkReader(stream, options.StreamLength)
}
trailerReader := newAWSChunkedTrailerReader(options.Trailers)
return &awsChunkedEncoding{
options: options,
encodedStream: io.MultiReader(chunkReader,
trailerReader,
bytes.NewBuffer(crlfBytes),
),
trailerEncodedLength: trailerReader.EncodedLength(),
}
}
// EncodedLength returns the final length of the aws-chunked content encoded
// stream if it can be determined without reading the underlying stream or lazy
// header values, otherwise -1 is returned.
func (e *awsChunkedEncoding) EncodedLength() int64 {
var length int64
if e.options.StreamLength == -1 || e.trailerEncodedLength == -1 {
return -1
}
if e.options.StreamLength != 0 {
// If the stream length is known, and there is no chunk length specified,
// only a single chunk will be used. Otherwise the stream length needs to
// include the multiple chunk padding content.
if e.options.ChunkLength == -1 {
length += getUnsignedChunkBytesLength(e.options.StreamLength)
} else {
// Compute chunk header and payload length
numChunks := e.options.StreamLength / int64(e.options.ChunkLength)
length += numChunks * getUnsignedChunkBytesLength(int64(e.options.ChunkLength))
if remainder := e.options.StreamLength % int64(e.options.ChunkLength); remainder != 0 {
length += getUnsignedChunkBytesLength(remainder)
}
}
}
// End chunk
length += int64(len(finalChunkBytes))
// Trailers
length += int64(e.trailerEncodedLength)
// Encoding terminator
length += int64(len(crlf))
return length
}
func getUnsignedChunkBytesLength(payloadLength int64) int64 {
payloadLengthStr := strconv.FormatInt(payloadLength, 16)
return int64(len(payloadLengthStr)) + int64(len(crlf)) + payloadLength + int64(len(crlf))
}
// HTTPHeaders returns the set of headers that must be included the request for
// aws-chunked to work. This includes the content-encoding: aws-chunked header.
//
// If there are multiple layered content encoding, the aws-chunked encoding
// must be appended to the previous layers the stream's encoding. The best way
// to do this is to append all header values returned to the HTTP request's set
// of headers.
func (e *awsChunkedEncoding) HTTPHeaders() map[string][]string {
headers := map[string][]string{
contentEncodingHeaderName: {
awsChunkedContentEncodingHeaderValue,
},
}
if len(e.options.Trailers) != 0 {
trailers := make([]string, 0, len(e.options.Trailers))
for name := range e.options.Trailers {
trailers = append(trailers, strings.ToLower(name))
}
headers[awsTrailerHeaderName] = trailers
}
return headers
}
func (e *awsChunkedEncoding) Read(b []byte) (n int, err error) {
return e.encodedStream.Read(b)
}
// awsChunkedTrailerReader provides a lazy reader for reading of aws-chunked
// content encoded trailers. The trailer values will not be retrieved until the
// reader is read from.
type awsChunkedTrailerReader struct {
reader *bytes.Buffer
trailers map[string]awsChunkedTrailerValue
trailerEncodedLength int
}
// newAWSChunkedTrailerReader returns an initialized awsChunkedTrailerReader to
// lazy reading aws-chunk content encoded trailers.
func newAWSChunkedTrailerReader(trailers map[string]awsChunkedTrailerValue) *awsChunkedTrailerReader {
return &awsChunkedTrailerReader{
trailers: trailers,
trailerEncodedLength: trailerEncodedLength(trailers),
}
}
func trailerEncodedLength(trailers map[string]awsChunkedTrailerValue) (length int) {
for name, trailer := range trailers {
length += len(name) + len(trailerKeyValueSeparator)
l := trailer.Length
if l == -1 {
return -1
}
length += l + len(crlf)
}
return length
}
// EncodedLength returns the length of the encoded trailers if the length could
// be determined without retrieving the header values. Returns -1 if length is
// unknown.
func (r *awsChunkedTrailerReader) EncodedLength() (length int) {
return r.trailerEncodedLength
}
// Read populates the passed in byte slice with bytes from the encoded
// trailers. Will lazy read header values first time Read is called.
func (r *awsChunkedTrailerReader) Read(p []byte) (int, error) {
if r.trailerEncodedLength == 0 {
return 0, io.EOF
}
if r.reader == nil {
trailerLen := r.trailerEncodedLength
if r.trailerEncodedLength == -1 {
trailerLen = 0
}
r.reader = bytes.NewBuffer(make([]byte, 0, trailerLen))
for name, trailer := range r.trailers {
r.reader.WriteString(name)
r.reader.WriteString(trailerKeyValueSeparator)
v, err := trailer.Get()
if err != nil {
return 0, fmt.Errorf("failed to get trailer value, %w", err)
}
r.reader.WriteString(v)
r.reader.WriteString(crlf)
}
}
return r.reader.Read(p)
}
// newUnsignedChunkReader returns an io.Reader encoding the underlying reader
// as unsigned aws-chunked chunks. The returned reader will also include the
// end chunk, but not the aws-chunked final `crlf` segment so trailers can be
// added.
//
// If the payload size is -1 for unknown length the content will be buffered in
// defaultChunkLength chunks before wrapped in aws-chunked chunk encoding.
func newUnsignedChunkReader(reader io.Reader, payloadSize int64) io.Reader {
if payloadSize == -1 {
return newBufferedAWSChunkReader(reader, defaultChunkLength)
}
var endChunk bytes.Buffer
if payloadSize == 0 {
endChunk.Write(finalChunkBytes)
return &endChunk
}
endChunk.WriteString(crlf)
endChunk.Write(finalChunkBytes)
var header bytes.Buffer
header.WriteString(strconv.FormatInt(payloadSize, 16))
header.WriteString(crlf)
return io.MultiReader(
&header,
reader,
&endChunk,
)
}
// Provides a buffered aws-chunked chunk encoder of an underlying io.Reader.
// Will include end chunk, but not the aws-chunked final `crlf` segment so
// trailers can be added.
//
// Note does not implement support for chunk extensions, e.g. chunk signing.
type bufferedAWSChunkReader struct {
reader io.Reader
chunkSize int
chunkSizeStr string
headerBuffer *bytes.Buffer
chunkBuffer *bytes.Buffer
multiReader io.Reader
multiReaderLen int
endChunkDone bool
}
// newBufferedAWSChunkReader returns an bufferedAWSChunkReader for reading
// aws-chunked encoded chunks.
func newBufferedAWSChunkReader(reader io.Reader, chunkSize int) *bufferedAWSChunkReader {
return &bufferedAWSChunkReader{
reader: reader,
chunkSize: chunkSize,
chunkSizeStr: strconv.FormatInt(int64(chunkSize), 16),
headerBuffer: bytes.NewBuffer(make([]byte, 0, 64)),
chunkBuffer: bytes.NewBuffer(make([]byte, 0, chunkSize+len(crlf))),
}
}
// Read attempts to read from the underlying io.Reader writing aws-chunked
// chunk encoded bytes to p. When the underlying io.Reader has been completed
// read the end chunk will be available. Once the end chunk is read, the reader
// will return EOF.
func (r *bufferedAWSChunkReader) Read(p []byte) (n int, err error) {
if r.multiReaderLen == 0 && r.endChunkDone {
return 0, io.EOF
}
if r.multiReader == nil || r.multiReaderLen == 0 {
r.multiReader, r.multiReaderLen, err = r.newMultiReader()
if err != nil {
return 0, err
}
}
n, err = r.multiReader.Read(p)
r.multiReaderLen -= n
if err == io.EOF && !r.endChunkDone {
// Edge case handling when the multi-reader has been completely read,
// and returned an EOF, make sure that EOF only gets returned if the
// end chunk was included in the multi-reader. Otherwise, the next call
// to read will initialize the next chunk's multi-reader.
err = nil
}
return n, err
}
// newMultiReader returns a new io.Reader for wrapping the next chunk. Will
// return an error if the underlying reader can not be read from. Will never
// return io.EOF.
func (r *bufferedAWSChunkReader) newMultiReader() (io.Reader, int, error) {
// io.Copy eats the io.EOF returned by io.LimitReader. Any error that
// occurs here is due to an actual read error.
n, err := io.Copy(r.chunkBuffer, io.LimitReader(r.reader, int64(r.chunkSize)))
if err != nil {
return nil, 0, err
}
if n == 0 {
// Early exit writing out only the end chunk. This does not include
// aws-chunk's final `crlf` so that trailers can still be added by
// upstream reader.
r.headerBuffer.Reset()
r.headerBuffer.WriteString("0")
r.headerBuffer.WriteString(crlf)
r.endChunkDone = true
return r.headerBuffer, r.headerBuffer.Len(), nil
}
r.chunkBuffer.WriteString(crlf)
chunkSizeStr := r.chunkSizeStr
if int(n) != r.chunkSize {
chunkSizeStr = strconv.FormatInt(n, 16)
}
r.headerBuffer.Reset()
r.headerBuffer.WriteString(chunkSizeStr)
r.headerBuffer.WriteString(crlf)
return io.MultiReader(
r.headerBuffer,
r.chunkBuffer,
), r.headerBuffer.Len() + r.chunkBuffer.Len(), nil
}

View File

@@ -0,0 +1,6 @@
// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package checksum
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.1.7"

View File

@@ -0,0 +1,185 @@
package checksum
import (
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// InputMiddlewareOptions provides the options for the request
// checksum middleware setup.
type InputMiddlewareOptions struct {
// GetAlgorithm is a function to get the checksum algorithm of the
// input payload from the input parameters.
//
// Given the input parameter value, the function must return the algorithm
// and true, or false if no algorithm is specified.
GetAlgorithm func(interface{}) (string, bool)
// Forces the middleware to compute the input payload's checksum. The
// request will fail if the algorithm is not specified or unable to compute
// the checksum.
RequireChecksum bool
// Enables support for wrapping the serialized input payload with a
// content-encoding: aws-check wrapper, and including a trailer for the
// algorithm's checksum value.
//
// The checksum will not be computed, nor added as trailing checksum, if
// the Algorithm's header is already set on the request.
EnableTrailingChecksum bool
// Enables support for computing the SHA256 checksum of input payloads
// along with the algorithm specified checksum. Prevents downstream
// middleware handlers (computePayloadSHA256) re-reading the payload.
//
// The SHA256 payload checksum will only be used for computed for requests
// that are not TLS, or do not enable trailing checksums.
//
// The SHA256 payload hash will not be computed, if the Algorithm's header
// is already set on the request.
EnableComputeSHA256PayloadHash bool
// Enables support for setting the aws-chunked decoded content length
// header for the decoded length of the underlying stream. Will only be set
// when used with trailing checksums, and aws-chunked content-encoding.
EnableDecodedContentLengthHeader bool
}
// AddInputMiddleware adds the middleware for performing checksum computing
// of request payloads, and checksum validation of response payloads.
func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) {
// TODO ensure this works correctly with presigned URLs
// Middleware stack:
// * (OK)(Initialize) --none--
// * (OK)(Serialize) EndpointResolver
// * (OK)(Build) ComputeContentLength
// * (AD)(Build) Header ComputeInputPayloadChecksum
// * SIGNED Payload - If HTTP && not support trailing checksum
// * UNSIGNED Payload - If HTTPS && not support trailing checksum
// * (RM)(Build) ContentChecksum - OK to remove
// * (OK)(Build) ComputePayloadHash
// * v4.dynamicPayloadSigningMiddleware
// * v4.computePayloadSHA256
// * v4.unsignedPayload
// (OK)(Build) Set computedPayloadHash header
// * (OK)(Finalize) Retry
// * (AD)(Finalize) Trailer ComputeInputPayloadChecksum,
// * Requires HTTPS && support trailing checksum
// * UNSIGNED Payload
// * Finalize run if HTTPS && support trailing checksum
// * (OK)(Finalize) Signing
// * (OK)(Deserialize) --none--
// Initial checksum configuration look up middleware
err = stack.Initialize.Add(&setupInputContext{
GetAlgorithm: options.GetAlgorithm,
}, middleware.Before)
if err != nil {
return err
}
stack.Build.Remove("ContentChecksum")
// Create the compute checksum middleware that will be added as both a
// build and finalize handler.
inputChecksum := &computeInputPayloadChecksum{
RequireChecksum: options.RequireChecksum,
EnableTrailingChecksum: options.EnableTrailingChecksum,
EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash,
EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader,
}
// Insert header checksum after ComputeContentLength middleware, must also
// be before the computePayloadHash middleware handlers.
err = stack.Build.Insert(inputChecksum,
(*smithyhttp.ComputeContentLength)(nil).ID(),
middleware.After)
if err != nil {
return err
}
// If trailing checksum is not supported no need for finalize handler to be added.
if options.EnableTrailingChecksum {
err = stack.Finalize.Insert(inputChecksum, "Retry", middleware.After)
if err != nil {
return err
}
}
return nil
}
// RemoveInputMiddleware Removes the compute input payload checksum middleware
// handlers from the stack.
func RemoveInputMiddleware(stack *middleware.Stack) {
id := (*setupInputContext)(nil).ID()
stack.Initialize.Remove(id)
id = (*computeInputPayloadChecksum)(nil).ID()
stack.Build.Remove(id)
stack.Finalize.Remove(id)
}
// OutputMiddlewareOptions provides options for configuring output checksum
// validation middleware.
type OutputMiddlewareOptions struct {
// GetValidationMode is a function to get the checksum validation
// mode of the output payload from the input parameters.
//
// Given the input parameter value, the function must return the validation
// mode and true, or false if no mode is specified.
GetValidationMode func(interface{}) (string, bool)
// The set of checksum algorithms that should be used for response payload
// checksum validation. The algorithm(s) used will be a union of the
// output's returned algorithms and this set.
//
// Only the first algorithm in the union is currently used.
ValidationAlgorithms []string
// If set the middleware will ignore output multipart checksums. Otherwise
// an checksum format error will be returned by the middleware.
IgnoreMultipartValidation bool
// When set the middleware will log when output does not have checksum or
// algorithm to validate.
LogValidationSkipped bool
// When set the middleware will log when the output contains a multipart
// checksum that was, skipped and not validated.
LogMultipartValidationSkipped bool
}
// AddOutputMiddleware adds the middleware for validating response payload's
// checksum.
func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error {
err := stack.Initialize.Add(&setupOutputContext{
GetValidationMode: options.GetValidationMode,
}, middleware.Before)
if err != nil {
return err
}
// Resolve a supported priority order list of algorithms to validate.
algorithms := FilterSupportedAlgorithms(options.ValidationAlgorithms)
m := &validateOutputPayloadChecksum{
Algorithms: algorithms,
IgnoreMultipartValidation: options.IgnoreMultipartValidation,
LogMultipartValidationSkipped: options.LogMultipartValidationSkipped,
LogValidationSkipped: options.LogValidationSkipped,
}
return stack.Deserialize.Add(m, middleware.After)
}
// RemoveOutputMiddleware Removes the compute input payload checksum middleware
// handlers from the stack.
func RemoveOutputMiddleware(stack *middleware.Stack) {
id := (*setupOutputContext)(nil).ID()
stack.Initialize.Remove(id)
id = (*validateOutputPayloadChecksum)(nil).ID()
stack.Deserialize.Remove(id)
}

View File

@@ -0,0 +1,480 @@
package checksum
import (
"context"
"crypto/sha256"
"fmt"
"hash"
"io"
"strconv"
"strings"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
const (
contentMD5Header = "Content-Md5"
streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
)
// computedInputChecksumsKey is the metadata key for recording the algorithm the
// checksum was computed for and the checksum value.
type computedInputChecksumsKey struct{}
// GetComputedInputChecksums returns the map of checksum algorithm to their
// computed value stored in the middleware Metadata. Returns false if no values
// were stored in the Metadata.
func GetComputedInputChecksums(m middleware.Metadata) (map[string]string, bool) {
vs, ok := m.Get(computedInputChecksumsKey{}).(map[string]string)
return vs, ok
}
// SetComputedInputChecksums stores the map of checksum algorithm to their
// computed value in the middleware Metadata. Overwrites any values that
// currently exist in the metadata.
func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) {
m.Set(computedInputChecksumsKey{}, vs)
}
// computeInputPayloadChecksum middleware computes payload checksum
type computeInputPayloadChecksum struct {
// Enables support for wrapping the serialized input payload with a
// content-encoding: aws-check wrapper, and including a trailer for the
// algorithm's checksum value.
//
// The checksum will not be computed, nor added as trailing checksum, if
// the Algorithm's header is already set on the request.
EnableTrailingChecksum bool
// States that a checksum is required to be included for the operation. If
// Input does not specify a checksum, fallback to built in MD5 checksum is
// used.
//
// Replaces smithy-go's ContentChecksum middleware.
RequireChecksum bool
// Enables support for computing the SHA256 checksum of input payloads
// along with the algorithm specified checksum. Prevents downstream
// middleware handlers (computePayloadSHA256) re-reading the payload.
//
// The SHA256 payload hash will only be used for computed for requests
// that are not TLS, or do not enable trailing checksums.
//
// The SHA256 payload hash will not be computed, if the Algorithm's header
// is already set on the request.
EnableComputePayloadHash bool
// Enables support for setting the aws-chunked decoded content length
// header for the decoded length of the underlying stream. Will only be set
// when used with trailing checksums, and aws-chunked content-encoding.
EnableDecodedContentLengthHeader bool
buildHandlerRun bool
deferToFinalizeHandler bool
}
// ID provides the middleware's identifier.
func (m *computeInputPayloadChecksum) ID() string {
return "AWSChecksum:ComputeInputPayloadChecksum"
}
type computeInputHeaderChecksumError struct {
Msg string
Err error
}
func (e computeInputHeaderChecksumError) Error() string {
const intro = "compute input header checksum failed"
if e.Err != nil {
return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err)
}
return fmt.Sprintf("%s, %s", intro, e.Msg)
}
func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err }
// HandleBuild handles computing the payload's checksum, in the following cases:
// * Is HTTP, not HTTPS
// * RequireChecksum is true, and no checksums were specified via the Input
// * Trailing checksums are not supported
//
// The build handler must be inserted in the stack before ContentPayloadHash
// and after ComputeContentLength.
func (m *computeInputPayloadChecksum) HandleBuild(
ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
) (
out middleware.BuildOutput, metadata middleware.Metadata, err error,
) {
m.buildHandlerRun = true
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, computeInputHeaderChecksumError{
Msg: fmt.Sprintf("unknown request type %T", req),
}
}
var algorithm Algorithm
var checksum string
defer func() {
if algorithm == "" || checksum == "" || err != nil {
return
}
// Record the checksum and algorithm that was computed
SetComputedInputChecksums(&metadata, map[string]string{
string(algorithm): checksum,
})
}()
// If no algorithm was specified, and the operation requires a checksum,
// fallback to the legacy content MD5 checksum.
algorithm, ok, err = getInputAlgorithm(ctx)
if err != nil {
return out, metadata, err
} else if !ok {
if m.RequireChecksum {
checksum, err = setMD5Checksum(ctx, req)
if err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to compute stream's MD5 checksum",
Err: err,
}
}
algorithm = Algorithm("MD5")
}
return next.HandleBuild(ctx, in)
}
// If the checksum header is already set nothing to do.
checksumHeader := AlgorithmHTTPHeader(algorithm)
if checksum = req.Header.Get(checksumHeader); checksum != "" {
return next.HandleBuild(ctx, in)
}
computePayloadHash := m.EnableComputePayloadHash
if v := v4.GetPayloadHash(ctx); v != "" {
computePayloadHash = false
}
stream := req.GetStream()
streamLength, err := getRequestStreamLength(req)
if err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to determine stream length",
Err: err,
}
}
// If trailing checksums are supported, the request is HTTPS, and the
// stream is not nil or empty, there is nothing to do in the build stage.
// The checksum will be added to the request as a trailing checksum in the
// finalize handler.
//
// Nil and empty streams will always be handled as a request header,
// regardless if the operation supports trailing checksums or not.
if strings.EqualFold(req.URL.Scheme, "https") {
if stream != nil && streamLength != 0 && m.EnableTrailingChecksum {
if m.EnableComputePayloadHash {
// payload hash is set as header in Build middleware handler,
// ContentSHA256Header.
ctx = v4.SetPayloadHash(ctx, streamingUnsignedPayloadTrailerPayloadHash)
}
m.deferToFinalizeHandler = true
return next.HandleBuild(ctx, in)
}
// If trailing checksums are not enabled but protocol is still HTTPS
// disabling computing the payload hash. Downstream middleware handler
// (ComputetPayloadHash) will set the payload hash to unsigned payload,
// if signing was used.
computePayloadHash = false
}
// Only seekable streams are supported for non-trailing checksums, because
// the stream needs to be rewound before the handler can continue.
if stream != nil && !req.IsStreamSeekable() {
return out, metadata, computeInputHeaderChecksumError{
Msg: "unseekable stream is not supported without TLS and trailing checksum",
}
}
var sha256Checksum string
checksum, sha256Checksum, err = computeStreamChecksum(
algorithm, stream, computePayloadHash)
if err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to compute stream checksum",
Err: err,
}
}
if err := req.RewindStream(); err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to rewind stream",
Err: err,
}
}
req.Header.Set(checksumHeader, checksum)
if computePayloadHash {
ctx = v4.SetPayloadHash(ctx, sha256Checksum)
}
return next.HandleBuild(ctx, in)
}
type computeInputTrailingChecksumError struct {
Msg string
Err error
}
func (e computeInputTrailingChecksumError) Error() string {
const intro = "compute input trailing checksum failed"
if e.Err != nil {
return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err)
}
return fmt.Sprintf("%s, %s", intro, e.Msg)
}
func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err }
// HandleFinalize handles computing the payload's checksum, in the following cases:
// * Is HTTPS, not HTTP
// * A checksum was specified via the Input
// * Trailing checksums are supported.
//
// The finalize handler must be inserted in the stack before Signing, and after Retry.
func (m *computeInputPayloadChecksum) HandleFinalize(
ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
if !m.deferToFinalizeHandler {
if !m.buildHandlerRun {
return out, metadata, computeInputTrailingChecksumError{
Msg: "build handler was removed without also removing finalize handler",
}
}
return next.HandleFinalize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, computeInputTrailingChecksumError{
Msg: fmt.Sprintf("unknown request type %T", req),
}
}
// Trailing checksums are only supported when TLS is enabled.
if !strings.EqualFold(req.URL.Scheme, "https") {
return out, metadata, computeInputTrailingChecksumError{
Msg: "HTTPS required",
}
}
// If no algorithm was specified, there is nothing to do.
algorithm, ok, err := getInputAlgorithm(ctx)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed to get algorithm",
Err: err,
}
} else if !ok {
return out, metadata, computeInputTrailingChecksumError{
Msg: "no algorithm specified",
}
}
// If the checksum header is already set before finalize could run, there
// is nothing to do.
checksumHeader := AlgorithmHTTPHeader(algorithm)
if req.Header.Get(checksumHeader) != "" {
return next.HandleFinalize(ctx, in)
}
stream := req.GetStream()
streamLength, err := getRequestStreamLength(req)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed to determine stream length",
Err: err,
}
}
if stream == nil || streamLength == 0 {
// Nil and empty streams are handled by the Build handler. They are not
// supported by the trailing checksums finalize handler. There is no
// benefit to sending them as trailers compared to headers.
return out, metadata, computeInputTrailingChecksumError{
Msg: "nil or empty streams are not supported",
}
}
checksumReader, err := newComputeChecksumReader(stream, algorithm)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed to created checksum reader",
Err: err,
}
}
awsChunkedReader := newUnsignedAWSChunkedEncoding(checksumReader,
func(o *awsChunkedEncodingOptions) {
o.Trailers[AlgorithmHTTPHeader(checksumReader.Algorithm())] = awsChunkedTrailerValue{
Get: checksumReader.Base64Checksum,
Length: checksumReader.Base64ChecksumLength(),
}
o.StreamLength = streamLength
})
for key, values := range awsChunkedReader.HTTPHeaders() {
for _, value := range values {
req.Header.Add(key, value)
}
}
// Setting the stream on the request will create a copy. The content length
// is not updated until after the request is copied to prevent impacting
// upstream middleware.
req, err = req.SetStream(awsChunkedReader)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed updating request to trailing checksum wrapped stream",
Err: err,
}
}
req.ContentLength = awsChunkedReader.EncodedLength()
in.Request = req
// Add decoded content length header if original stream's content length is known.
if streamLength != -1 && m.EnableDecodedContentLengthHeader {
req.Header.Set(decodedContentLengthHeaderName, strconv.FormatInt(streamLength, 10))
}
out, metadata, err = next.HandleFinalize(ctx, in)
if err == nil {
checksum, err := checksumReader.Base64Checksum()
if err != nil {
return out, metadata, fmt.Errorf("failed to get computed checksum, %w", err)
}
// Record the checksum and algorithm that was computed
SetComputedInputChecksums(&metadata, map[string]string{
string(algorithm): checksum,
})
}
return out, metadata, err
}
func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) {
ctxAlgorithm := getContextInputAlgorithm(ctx)
if ctxAlgorithm == "" {
return "", false, nil
}
algorithm, err := ParseAlgorithm(ctxAlgorithm)
if err != nil {
return "", false, fmt.Errorf(
"failed to parse algorithm, %w", err)
}
return algorithm, true, nil
}
func computeStreamChecksum(algorithm Algorithm, stream io.Reader, computePayloadHash bool) (
checksum string, sha256Checksum string, err error,
) {
hasher, err := NewAlgorithmHash(algorithm)
if err != nil {
return "", "", fmt.Errorf(
"failed to get hasher for checksum algorithm, %w", err)
}
var sha256Hasher hash.Hash
var batchHasher io.Writer = hasher
// Compute payload hash for the protocol. To prevent another handler
// (computePayloadSHA256) re-reading body also compute the SHA256 for
// request signing. If configured checksum algorithm is SHA256, don't
// double wrap stream with another SHA256 hasher.
if computePayloadHash && algorithm != AlgorithmSHA256 {
sha256Hasher = sha256.New()
batchHasher = io.MultiWriter(hasher, sha256Hasher)
}
if stream != nil {
if _, err = io.Copy(batchHasher, stream); err != nil {
return "", "", fmt.Errorf(
"failed to read stream to compute hash, %w", err)
}
}
checksum = string(base64EncodeHashSum(hasher))
if computePayloadHash {
if algorithm != AlgorithmSHA256 {
sha256Checksum = string(hexEncodeHashSum(sha256Hasher))
} else {
sha256Checksum = string(hexEncodeHashSum(hasher))
}
}
return checksum, sha256Checksum, nil
}
func getRequestStreamLength(req *smithyhttp.Request) (int64, error) {
if v := req.ContentLength; v > 0 {
return v, nil
}
if length, ok, err := req.StreamLength(); err != nil {
return 0, fmt.Errorf("failed getting request stream's length, %w", err)
} else if ok {
return length, nil
}
return -1, nil
}
// setMD5Checksum computes the MD5 of the request payload and sets it to the
// Content-MD5 header. Returning the MD5 base64 encoded string or error.
//
// If the MD5 is already set as the Content-MD5 header, that value will be
// returned, and nothing else will be done.
//
// If the payload is empty, no MD5 will be computed. No error will be returned.
// Empty payloads do not have an MD5 value.
//
// Replaces the smithy-go middleware for httpChecksum trait.
func setMD5Checksum(ctx context.Context, req *smithyhttp.Request) (string, error) {
if v := req.Header.Get(contentMD5Header); len(v) != 0 {
return v, nil
}
stream := req.GetStream()
if stream == nil {
return "", nil
}
if !req.IsStreamSeekable() {
return "", fmt.Errorf(
"unseekable stream is not supported for computing md5 checksum")
}
v, err := computeMD5Checksum(stream)
if err != nil {
return "", err
}
if err := req.RewindStream(); err != nil {
return "", fmt.Errorf("failed to rewind stream after computing MD5 checksum, %w", err)
}
// set the 'Content-MD5' header
req.Header.Set(contentMD5Header, string(v))
return string(v), nil
}

View File

@@ -0,0 +1,117 @@
package checksum
import (
"context"
"github.com/aws/smithy-go/middleware"
)
// setupChecksumContext is the initial middleware that looks up the input
// used to configure checksum behavior. This middleware must be executed before
// input validation step or any other checksum middleware.
type setupInputContext struct {
// GetAlgorithm is a function to get the checksum algorithm of the
// input payload from the input parameters.
//
// Given the input parameter value, the function must return the algorithm
// and true, or false if no algorithm is specified.
GetAlgorithm func(interface{}) (string, bool)
}
// ID for the middleware
func (m *setupInputContext) ID() string {
return "AWSChecksum:SetupInputContext"
}
// HandleInitialize initialization middleware that setups up the checksum
// context based on the input parameters provided in the stack.
func (m *setupInputContext) HandleInitialize(
ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
// Check if validation algorithm is specified.
if m.GetAlgorithm != nil {
// check is input resource has a checksum algorithm
algorithm, ok := m.GetAlgorithm(in.Parameters)
if ok && len(algorithm) != 0 {
ctx = setContextInputAlgorithm(ctx, algorithm)
}
}
return next.HandleInitialize(ctx, in)
}
// inputAlgorithmKey is the key set on context used to identify, retrieves the
// request checksum algorithm if present on the context.
type inputAlgorithmKey struct{}
// setContextInputAlgorithm sets the request checksum algorithm on the context.
//
// Scoped to stack values.
func setContextInputAlgorithm(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, inputAlgorithmKey{}, value)
}
// getContextInputAlgorithm returns the checksum algorithm from the context if
// one was specified. Empty string is returned if one is not specified.
//
// Scoped to stack values.
func getContextInputAlgorithm(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, inputAlgorithmKey{}).(string)
return v
}
type setupOutputContext struct {
// GetValidationMode is a function to get the checksum validation
// mode of the output payload from the input parameters.
//
// Given the input parameter value, the function must return the validation
// mode and true, or false if no mode is specified.
GetValidationMode func(interface{}) (string, bool)
}
// ID for the middleware
func (m *setupOutputContext) ID() string {
return "AWSChecksum:SetupOutputContext"
}
// HandleInitialize initialization middleware that setups up the checksum
// context based on the input parameters provided in the stack.
func (m *setupOutputContext) HandleInitialize(
ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
// Check if validation mode is specified.
if m.GetValidationMode != nil {
// check is input resource has a checksum algorithm
mode, ok := m.GetValidationMode(in.Parameters)
if ok && len(mode) != 0 {
ctx = setContextOutputValidationMode(ctx, mode)
}
}
return next.HandleInitialize(ctx, in)
}
// outputValidationModeKey is the key set on context used to identify if
// output checksum validation is enabled.
type outputValidationModeKey struct{}
// setContextOutputValidationMode sets the request checksum
// algorithm on the context.
//
// Scoped to stack values.
func setContextOutputValidationMode(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, outputValidationModeKey{}, value)
}
// getContextOutputValidationMode returns response checksum validation state,
// if one was specified. Empty string is returned if one is not specified.
//
// Scoped to stack values.
func getContextOutputValidationMode(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, outputValidationModeKey{}).(string)
return v
}

View File

@@ -0,0 +1,131 @@
package checksum
import (
"context"
"fmt"
"strings"
"github.com/aws/smithy-go"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// outputValidationAlgorithmsUsedKey is the metadata key for indexing the algorithms
// that were used, by the middleware's validation.
type outputValidationAlgorithmsUsedKey struct{}
// GetOutputValidationAlgorithmsUsed returns the checksum algorithms used
// stored in the middleware Metadata. Returns false if no algorithms were
// stored in the Metadata.
func GetOutputValidationAlgorithmsUsed(m middleware.Metadata) ([]string, bool) {
vs, ok := m.Get(outputValidationAlgorithmsUsedKey{}).([]string)
return vs, ok
}
// SetOutputValidationAlgorithmsUsed stores the checksum algorithms used in the
// middleware Metadata.
func SetOutputValidationAlgorithmsUsed(m *middleware.Metadata, vs []string) {
m.Set(outputValidationAlgorithmsUsedKey{}, vs)
}
// validateOutputPayloadChecksum middleware computes payload checksum of the
// received response and validates with checksum returned by the service.
type validateOutputPayloadChecksum struct {
// Algorithms represents a priority-ordered list of valid checksum
// algorithm that should be validated when present in HTTP response
// headers.
Algorithms []Algorithm
// IgnoreMultipartValidation indicates multipart checksums ending with "-#"
// will be ignored.
IgnoreMultipartValidation bool
// When set the middleware will log when output does not have checksum or
// algorithm to validate.
LogValidationSkipped bool
// When set the middleware will log when the output contains a multipart
// checksum that was, skipped and not validated.
LogMultipartValidationSkipped bool
}
func (m *validateOutputPayloadChecksum) ID() string {
return "AWSChecksum:ValidateOutputPayloadChecksum"
}
// HandleDeserialize is a Deserialize middleware that wraps the HTTP response
// body with an io.ReadCloser that will validate the its checksum.
func (m *validateOutputPayloadChecksum) HandleDeserialize(
ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
if err != nil {
return out, metadata, err
}
// If there is no validation mode specified nothing is supported.
if mode := getContextOutputValidationMode(ctx); mode != "ENABLED" {
return out, metadata, err
}
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{
Err: fmt.Errorf("unknown transport type %T", out.RawResponse),
}
}
var expectedChecksum string
var algorithmToUse Algorithm
for _, algorithm := range m.Algorithms {
value := response.Header.Get(AlgorithmHTTPHeader(algorithm))
if len(value) == 0 {
continue
}
expectedChecksum = value
algorithmToUse = algorithm
}
// TODO this must validate the validation mode is set to enabled.
logger := middleware.GetLogger(ctx)
// Skip validation if no checksum algorithm or checksum is available.
if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 {
if m.LogValidationSkipped {
// TODO this probably should have more information about the
// operation output that won't be validated.
logger.Logf(logging.Warn,
"Response has no supported checksum. Not validating response payload.")
}
return out, metadata, nil
}
// Ignore multipart validation
if m.IgnoreMultipartValidation && strings.Contains(expectedChecksum, "-") {
if m.LogMultipartValidationSkipped {
// TODO this probably should have more information about the
// operation output that won't be validated.
logger.Logf(logging.Warn, "Skipped validation of multipart checksum.")
}
return out, metadata, nil
}
body, err := newValidateChecksumReader(response.Body, algorithmToUse, expectedChecksum)
if err != nil {
return out, metadata, fmt.Errorf("failed to create checksum validation reader, %w", err)
}
response.Body = body
// Update the metadata to include the set of the checksum algorithms that
// will be validated.
SetOutputValidationAlgorithmsUsed(&metadata, []string{
string(algorithmToUse),
})
return out, metadata, nil
}