chore: upgrade dependencies

This commit is contained in:
Cyrille Nofficial 2022-06-09 12:30:53 +02:00
parent 7203f3d6a1
commit dcb93ec8f7
518 changed files with 27809 additions and 3222 deletions

47
go.mod
View File

@ -3,37 +3,40 @@ module github.com/cyrilix/robocar-tools
go 1.18
require (
github.com/aws/aws-sdk-go-v2 v1.11.0
github.com/aws/aws-sdk-go-v2/config v1.10.1
github.com/aws/aws-sdk-go-v2/service/s3 v1.19.0
github.com/aws/aws-sdk-go-v2/service/sagemaker v1.19.0
github.com/cyrilix/robocar-base v0.1.4
github.com/cyrilix/robocar-protobuf/go v1.0.3
github.com/aws/aws-sdk-go-v2 v1.16.5
github.com/aws/aws-sdk-go-v2/config v1.15.10
github.com/aws/aws-sdk-go-v2/service/s3 v1.26.11
github.com/aws/aws-sdk-go-v2/service/sagemaker v1.32.1
github.com/cyrilix/robocar-base v0.1.7
github.com/cyrilix/robocar-protobuf/go v1.0.5
github.com/disintegration/imaging v1.6.2
github.com/eclipse/paho.mqtt.golang v1.3.5
github.com/eclipse/paho.mqtt.golang v1.4.1
github.com/golang/protobuf v1.5.2
go.uber.org/zap v1.19.1
gocv.io/x/gocv v0.28.0
go.uber.org/zap v1.21.0
gocv.io/x/gocv v0.31.0
)
require (
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.6.1 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.6.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.10.0 // indirect
github.com/aws/smithy-go v1.9.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.2 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.12.5 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.13 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.6 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.11.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7 // indirect
github.com/aws/smithy-go v1.11.3 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 // indirect
golang.org/x/net v0.0.0-20210614182718-04defd469f4e // indirect
google.golang.org/protobuf v1.26.0 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
google.golang.org/protobuf v1.28.0 // indirect
)

102
go.sum
View File

@ -1,55 +1,58 @@
github.com/aws/aws-sdk-go-v2 v1.11.0 h1:HxyD62DyNhCfiFGUHqJ/xITD6rAjJ7Dm/2nLxLmO4Ag=
github.com/aws/aws-sdk-go-v2 v1.11.0/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 h1:yVUAwvJC/0WNPbyl0nA3j1L6CW1CN8wBubCRqtG7JLI=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0/go.mod h1:Xn6sxgRuIDflLRJFj5Ev7UxABIkNbccFPV/p8itDReM=
github.com/aws/aws-sdk-go-v2/config v1.10.1 h1:z/ViqIjW6ZeuLWgTWMTSyZzaVWo/1cWeVf1Uu+RF01E=
github.com/aws/aws-sdk-go-v2/config v1.10.1/go.mod h1:auIv5pIIn3jIBHNRcVQcsczn6Pfa6Dyv80Fai0ueoJU=
github.com/aws/aws-sdk-go-v2/credentials v1.6.1 h1:A39JYth2fFCx+omN/gib/jIppx3rRnt2r7UKPq7Mh5Y=
github.com/aws/aws-sdk-go-v2/credentials v1.6.1/go.mod h1:QyvQk1IYTqBWSi1T6UgT/W8DMxBVa5pVuLFSRLLhGf8=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0 h1:OpZjuUy8Jt3CA1WgJgBC5Bz+uOjE5Ppx4NFTRaooUuA=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0/go.mod h1:5E1J3/TTYy6z909QNR0QnXGBpfESYGDqd3O0zqONghU=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0 h1:zY8cNmbBXt3pzjgWgdIbzpQ6qxoCwt+Nx9JbrAf2mbY=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0/go.mod h1:NO3Q5ZTTQtO2xIg2+xTXYDiT7knSejfeDm7WGDaOo0U=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0 h1:Z3aR/OXBnkYK9zXkNkfitHX6SmUBzSsx8VMHbH4Lvhw=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0/go.mod h1:anlUzBoEWglcUxUQwZA7HQOEVEnQALVZsizAapB2hq8=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0 h1:c10Z7fWxtJCoyc8rv06jdh9xrKnu7bAJiRaKWvTb2mU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0/go.mod h1:6oXGy4GLpypD3uCh8wcqztigGgmhLToMfjavgh+VySg=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 h1:lPLbw4Gn59uoKqvOfSnkJr54XWk5Ak1NK20ZEiSWb3U=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0/go.mod h1:80NaCIH9YU3rzTTs/J/ECATjXuRqzo/wB6ukO6MZ0XY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0 h1:qGZWS/WgiFY+Zgad2u0gwBHpJxz6Ne401JE7iQI1nKs=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0/go.mod h1:Mq6AEc+oEjCUlBuLiK5YwW4shSOAKCQ3tXN0sQeYoBA=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.0 h1:0BOlTqnNnrEO04oYKzDxMMe68t107pmIotn18HtVonY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.0/go.mod h1:xKCZ4YFSF2s4Hnb/J0TLeOsKuGzICzcElaOKNGrVnx4=
github.com/aws/aws-sdk-go-v2/service/s3 v1.19.0 h1:5mRAms4TjSTOGYsqKYte5kHr1PzpMJSyLThjF3J+hw0=
github.com/aws/aws-sdk-go-v2/service/s3 v1.19.0/go.mod h1:Gwz3aVctJe6mUY9T//bcALArPUaFmNAy2rTB9qN4No8=
github.com/aws/aws-sdk-go-v2/service/sagemaker v1.19.0 h1:IzWYAKM+1tnENJGfBhR29EMpO0NpDcwjdcn+Pce4w4g=
github.com/aws/aws-sdk-go-v2/service/sagemaker v1.19.0/go.mod h1:mRNg/Phr90VO7VXZ9z49YvKGVVwraKxomBS7DfbXliw=
github.com/aws/aws-sdk-go-v2/service/sso v1.6.0 h1:JDgKIUZOmLFu/Rv6zXLrVTWCmzA0jcTdvsT8iFIKrAI=
github.com/aws/aws-sdk-go-v2/service/sso v1.6.0/go.mod h1:Q/l0ON1annSU+mc0JybDy1Gy6dnJxIcWjphO6qJPzvM=
github.com/aws/aws-sdk-go-v2/service/sts v1.10.0 h1:1jh8J+JjYRp+QWKOsaZt7rGUgoyrqiiVwIm+w0ymeUw=
github.com/aws/aws-sdk-go-v2/service/sts v1.10.0/go.mod h1:jLKCFqS+1T4i7HDqCP9GM4Uk75YW1cS0o82LdxpMyOE=
github.com/aws/smithy-go v1.9.0 h1:c7FUdEqrQA1/UVKKCNDFQPNKGp4FQg3YW4Ck5SLTG58=
github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
github.com/aws/aws-sdk-go-v2 v1.16.5 h1:Ah9h1TZD9E2S1LzHpViBO3Jz9FPL5+rmflmb8hXirtI=
github.com/aws/aws-sdk-go-v2 v1.16.5/go.mod h1:Wh7MEsmEApyL5hrWzpDkba4gwAPc5/piwLVLFnCxp48=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.2 h1:LFOGNUQxc/8BlhA4FD+JdYjJKQK6tsz9Xiuh+GUTKAQ=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.2/go.mod h1:u/38zebMi809w7YFnqY/07Tw/FSs6DGhPD95Xiig7XQ=
github.com/aws/aws-sdk-go-v2/config v1.15.10 h1:0HSMRNGlR0/WlGbeKC9DbBphBwRIK5H4cKUbgqNTKcA=
github.com/aws/aws-sdk-go-v2/config v1.15.10/go.mod h1:XL4DzwzWdwXBzKdwMdpLkMIaGEQCYRQyzA4UnJaUnNk=
github.com/aws/aws-sdk-go-v2/credentials v1.12.5 h1:WNNCUTWA0vyMy5t8LfS4iB7QshsW0DsHS/VdhyCGZWM=
github.com/aws/aws-sdk-go-v2/credentials v1.12.5/go.mod h1:DOcdLlkqUiNGyXnjWgspC3eIAdXhj8q0pO1LiSvrTI4=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6 h1:+NZzDh/RpcQTpo9xMFUgkseIam6PC+YJbdhbQp1NOXI=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6/go.mod h1:ClLMcuQA/wcHPmOIfNzNI4Y1Q0oDbmEkbYhMFOzHDh8=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12 h1:Zt7DDk5V7SyQULUUwIKzsROtVzp/kVvcz15uQx/Tkow=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12/go.mod h1:Afj/U8svX6sJ77Q+FPWMzabJ9QjbwP32YlopgKALUpg=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6 h1:eeXdGVtXEe+2Jc49+/vAzna3FAQnUD4AagAw8tzbmfc=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6/go.mod h1:FwpAKI+FBPIELJIdmQzlLtRe8LQSOreMcM2wBsPMvvc=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.13 h1:L/l0WbIpIadRO7i44jZh1/XeXpNDX0sokFppb4ZnXUI=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.13/go.mod h1:hiM/y1XPp3DoEPhoVEYc/CZcS58dP6RKJRDFp99wdX0=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.3 h1:m1vDVDoNK4tZAoWtcetHopEdIeUlrNNpdLZ7cwZke6s=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.3/go.mod h1:annFthsb7FiHQd5X9wKDNst9OJvVFY0l0LjQ8zQniJA=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.2 h1:T/ywkX1ed+TsZVQccu/8rRJGxKZF/t0Ivgrb4MHTSeo=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.2/go.mod h1:RnloUnyZ4KN9JStGY1LuQ7Wzqh7V0f8FinmRdHYtuaA=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.7 h1:DYUAx8lWAhIzFiD284oq6RUPKppKk3cyqv/hyUkbWuA=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.7/go.mod h1:6tcs0yjwAW2Z9Yb3Z4X/2tm3u9jNox1dvXxVXTd73Zw=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6 h1:0ZxYAZ1cn7Swi/US55VKciCE6RhRHIwCKIWaMLdT6pg=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6/go.mod h1:DxAPjquoEHf3rUHh1b9+47RAaXB8/7cB6jkzCt/GOEI=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.6 h1:SSrqxZVhrO371eg/C8Fnj6kduzltKHj/mJl2swkTBGc=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.6/go.mod h1:TzDyqDka0783D93yVirkcysbibVRxjX5HFJEWms4kKA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.26.11 h1:Wt0512f6GfLiMd6a+NuOCC9r3/trmzHMTB697CBDUwg=
github.com/aws/aws-sdk-go-v2/service/s3 v1.26.11/go.mod h1:VMTprbiZWqW44viXgPSQhWdeZ8JTAeJwhO7OXpC/Rsg=
github.com/aws/aws-sdk-go-v2/service/sagemaker v1.32.1 h1:nOjJ5PevzE1ymPIYnJk6X/+EkeL2aDP1E4HpJvCwlAQ=
github.com/aws/aws-sdk-go-v2/service/sagemaker v1.32.1/go.mod h1:nF9ZnJeXLYw8PPRAtsBbG3aY1P8c5bUH2zqA/IAzaks=
github.com/aws/aws-sdk-go-v2/service/sso v1.11.8 h1:GNIdO14AHW5CgnzMml3Tg5Fy/+NqPQvnh1HsC1zpcPo=
github.com/aws/aws-sdk-go-v2/service/sso v1.11.8/go.mod h1:UqRD9bBt15P0ofRyDZX6CfsIqPpzeHOhZKWzgSuAzpo=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7 h1:HLzjwQM9975FQWSF3uENDGHT1gFQm/q3QXu2BYIcI08=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7/go.mod h1:lVxTdiiSHY3jb1aeg+BBFtDzZGSUCv6qaNOyEGCJ1AY=
github.com/aws/smithy-go v1.11.3 h1:DQixirEFM9IaKxX1olZ3ke3nvxRS2xMDteKIDWxozW8=
github.com/aws/smithy-go v1.11.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/cyrilix/robocar-base v0.1.4 h1:nfnjRwAcCfS7xGu6tW9rZhmc/HZIsuDJX5NFhgX5dWE=
github.com/cyrilix/robocar-base v0.1.4/go.mod h1:Tt04UmbGBiQtU0Cn3wFD0q7XoyokTwIlWYQxThKI+04=
github.com/cyrilix/robocar-protobuf/go v1.0.3 h1:iPHw2+7FVXG2C4+Th1m11hQ+2RpAQzlxKhc5M7XOa6Q=
github.com/cyrilix/robocar-protobuf/go v1.0.3/go.mod h1:xb95cK07lYXnKcHZKnGafmAgYRrqZWZgV9LMiJAp+gE=
github.com/cyrilix/robocar-base v0.1.7 h1:EVzZ0KjigSFpke5f3A/PybEH3WFUEIrYSc3z/dhOZ48=
github.com/cyrilix/robocar-base v0.1.7/go.mod h1:4E11HQSNy2NT8e7MW188y6ST9C0RzarKyn7sK/3V/Lk=
github.com/cyrilix/robocar-protobuf/go v1.0.5 h1:PX1At+pf6G7gJwT4LzJLQu3/LPFTTNNlZmZSYtnSELY=
github.com/cyrilix/robocar-protobuf/go v1.0.5/go.mod h1:Y3AE28K5V7EZxMXp/6A8RhkRz15VOfFy4CjST35FbtQ=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
github.com/eclipse/paho.mqtt.golang v1.3.5 h1:sWtmgNxYM9P2sP+xEItMozsR3w0cqZFlqnNN1bdl41Y=
github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc=
github.com/eclipse/paho.mqtt.golang v1.4.1 h1:tUSpviiL5G3P9SZZJPC4ZULZJsxQKXxfENpMvdbAXAI=
github.com/eclipse/paho.mqtt.golang v1.4.1/go.mod h1:JGt0RsEwEX+Xa/agj90YJ9d9DH2b7upDZMK9HRbFvCA=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hybridgroup/mjpeg v0.0.0-20140228234708-4680f319790e/go.mod h1:eagM805MRKrioHYuU7iKLUyFPVKqVV6um5DAvCkUtXs=
@ -72,14 +75,14 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
gocv.io/x/gocv v0.28.0 h1:hweRS9Js60YEZPZzjhU5I+0E2ngazquLlO78zwnrFvY=
gocv.io/x/gocv v0.28.0/go.mod h1:oc6FvfYqfBp99p+yOEzs9tbYF9gOrAQSeL/dyIPefJU=
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
gocv.io/x/gocv v0.31.0 h1:BHDtK8v+YPvoSPQTTiZB2fM/7BLg6511JqkruY2z6LQ=
gocv.io/x/gocv v0.31.0/go.mod h1:oc6FvfYqfBp99p+yOEzs9tbYF9gOrAQSeL/dyIPefJU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U=
@ -94,6 +97,7 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -111,11 +115,11 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
# Lint rules to ignore
LINTIGNORESINGLEFIGHT='internal/sync/singleflight/singleflight.go:.+error should be the last type'
LINT_IGNORE_S3MANAGER_INPUT='feature/s3/manager/upload.go:.+struct field SSEKMSKeyId should be SSEKMSKeyID'
UNIT_TEST_TAGS=
BUILD_TAGS=-tags "example,codegen,integration,ec2env,perftest"
@ -39,6 +40,7 @@ ENDPOINT_PREFIX_JSON=${CODEGEN_RESOURCES_PATH}/endpoint-prefix.json
LICENSE_FILE=$(shell pwd)/LICENSE.txt
SMITHY_GO_VERSION ?=
PRE_RELEASE_VERSION ?=
RELEASE_MANIFEST_FILE ?=
RELEASE_CHGLOG_DESC_FILE ?=
@ -57,6 +59,12 @@ REPOTOOLS_CMD_EDIT_MODULE_DEPENDENCY = ${REPOTOOLS_MODULE}/cmd/editmoduledepende
REPOTOOLS_CALCULATE_RELEASE_VERBOSE ?= false
REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG=-v=${REPOTOOLS_CALCULATE_RELEASE_VERBOSE}
REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS ?=
ifneq ($(PRE_RELEASE_VERSION),)
REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION}
endif
.PHONY: all
all: generate unit
@ -65,13 +73,13 @@ all: generate unit
###################
.PHONY: generate smithy-generate smithy-build smithy-build-% smithy-clean smithy-go-publish-local format \
gen-config-asserts gen-repo-mod-replace gen-mod-replace-smithy gen-mod-dropreplace-smithy gen-aws-ptrs tidy-modules-% \
add-module-license-files sync-models sync-endpoints-model sync-endpoints.json clone-v1-models gen-endpoint-prefix.json \
add-module-license-files sync-models sync-endpoints-model sync-endpoints.json clone-v1-models gen-internal-codegen \
sync-api-models copy-attributevalue-feature min-go-version-% update-requires smithy-annotate-stable \
update-module-metadata download-modules-%
generate: smithy-generate update-requires gen-repo-mod-replace update-module-metadata smithy-annotate-stable \
gen-config-asserts copy-attributevalue-feature gen-mod-dropreplace-smithy min-go-version-. tidy-modules-. \
add-module-license-files gen-aws-ptrs format
gen-config-asserts gen-internal-codegen copy-attributevalue-feature gen-mod-dropreplace-smithy min-go-version-. \
tidy-modules-. add-module-license-files gen-aws-ptrs format
smithy-generate:
cd codegen && ./gradlew clean build -Plog-tests && ./gradlew clean
@ -109,6 +117,11 @@ gen-config-asserts:
&& go mod tidy \
&& go generate
gen-internal-codegen:
@echo "Generating internal/codegen"
cd internal/codegen \
&& go generate
gen-repo-mod-replace:
@echo "Generating go.mod replace for repo modules"
go run ${REPOTOOLS_CMD_MAKE_RELATIVE}
@ -151,7 +164,7 @@ add-module-license-files:
sync-models: sync-endpoints-model sync-api-models
sync-endpoints-model: sync-endpoints.json gen-endpoint-prefix.json
sync-endpoints-model: sync-endpoints.json
sync-endpoints.json:
[[ ! -z "${ENDPOINTS_MODEL}" ]] && cp ${ENDPOINTS_MODEL} ${ENDPOINTS_JSON} || echo "ENDPOINTS_MODEL not set, must not be empty"
@ -160,12 +173,6 @@ clone-v1-models:
rm -rf /tmp/aws-sdk-go-model-sync
git clone https://github.com/aws/aws-sdk-go.git --depth 1 /tmp/aws-sdk-go-model-sync
gen-endpoint-prefix.json: clone-v1-models
cd internal/repotools/cmd/endpointPrefix && \
go run . \
-m '/tmp/aws-sdk-go-model-sync/models/apis/*/*/api-2.json' \
-o ${ENDPOINT_PREFIX_JSON}
sync-api-models:
cd internal/repotools/cmd/syncAPIModels && \
go run . \
@ -398,7 +405,7 @@ ls-changes:
go run ${REPOTOOLS_CMD_CHANGELOG} ls
preview-release:
go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG}
go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS}
pre-release-validation:
@if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \
@ -409,7 +416,7 @@ pre-release-validation:
fi
release: pre-release-validation
go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG}
go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS}
go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE}
go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE}
go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE}
@ -441,6 +448,7 @@ lint:
@echo "go lint SDK and vendor packages"
@lint=`golint ./...`; \
dolint=`echo "$$lint" | grep -E -v \
-e ${LINT_IGNORE_S3MANAGER_INPUT} \
-e ${LINTIGNORESINGLEFIGHT}`; \
echo "$$dolint"; \
if [ "$$dolint" != "" ]; then exit 1; fi
@ -458,6 +466,21 @@ sdkv1check:
echo "$$sdkv1usage"; \
if [ "$$sdkv1usage" != "" ]; then exit 1; fi
list-deps: list-deps-.
list-deps-%:
@# command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "list-deps-" and
@# replaces all "_" with "/".
@#
@# Trim output to only include stdout for list of dependencies only.
@# make list-deps 2>&-
@#
@# e.g. list-deps-internal_protocoltest
@cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst list-deps-,,$@)) ${EACHMODULE_FLAGS} \
"go list -m all | grep -v 'github.com/aws/aws-sdk-go-v2'" | sort -u
###################
# Sandbox Testing #
###################

View File

@ -38,24 +38,52 @@ type Config struct {
// will prevent the SDK from modifying the HTTP client.
HTTPClient HTTPClient
// An endpoint resolver that can be used to provide or override an endpoint for the given
// service and region Please see the `aws.EndpointResolver` documentation on usage.
// An endpoint resolver that can be used to provide or override an endpoint
// for the given service and region.
//
// See the `aws.EndpointResolver` documentation for additional usage
// information.
//
// Deprecated: See Config.EndpointResolverWithOptions
EndpointResolver EndpointResolver
// An endpoint resolver that can be used to provide or override an endpoint for the given
// service and region Please see the `aws.EndpointResolverWithOptions` documentation on usage.
// An endpoint resolver that can be used to provide or override an endpoint
// for the given service and region.
//
// When EndpointResolverWithOptions is specified, it will be used by a
// service client rather than using EndpointResolver if also specified.
//
// See the `aws.EndpointResolverWithOptions` documentation for additional
// usage information.
EndpointResolverWithOptions EndpointResolverWithOptions
// Retryer is a function that provides a Retryer implementation. A Retryer guides how HTTP requests should be
// retried in case of recoverable failures. When nil the API client will use a default
// retryer.
// RetryMaxAttempts specifies the maximum number attempts an API client
// will call an operation that fails with a retryable error.
//
// In general, the provider function should return a new instance of a Retryer if you are attempting
// to provide a consistent Retryer configuration across all clients. This will ensure that each client will be
// provided a new instance of the Retryer implementation, and will avoid issues such as sharing the same retry token
// bucket across services.
// API Clients will only use this value to construct a retryer if the
// Config.Retryer member is not nil. This value will be ignored if
// Retryer is not nil.
RetryMaxAttempts int
// RetryMode specifies the retry model the API client will be created with.
//
// API Clients will only use this value to construct a retryer if the
// Config.Retryer member is not nil. This value will be ignored if
// Retryer is not nil.
RetryMode RetryMode
// Retryer is a function that provides a Retryer implementation. A Retryer
// guides how HTTP requests should be retried in case of recoverable
// failures. When nil the API client will use a default retryer.
//
// In general, the provider function should return a new instance of a
// Retryer if you are attempting to provide a consistent Retryer
// configuration across all clients. This will ensure that each client will
// be provided a new instance of the Retryer implementation, and will avoid
// issues such as sharing the same retry token bucket across services.
//
// If not nil, RetryMaxAttempts, and RetryMode will be ignored by API
// clients.
Retryer func() Retryer
// ConfigSources are the sources that were used to construct the Config.
@ -71,13 +99,26 @@ type Config struct {
// standard error.
Logger logging.Logger
// Configures the events that will be sent to the configured logger.
// This can be used to configure the logging of signing, retries, request, and responses
// of the SDK clients.
// Configures the events that will be sent to the configured logger. This
// can be used to configure the logging of signing, retries, request, and
// responses of the SDK clients.
//
// See the ClientLogMode type documentation for the complete set of logging modes and available
// configuration.
// See the ClientLogMode type documentation for the complete set of logging
// modes and available configuration.
ClientLogMode ClientLogMode
// The configured DefaultsMode. If not specified, service clients will
// default to legacy.
//
// Supported modes are: auto, cross-region, in-region, legacy, mobile,
// standard
DefaultsMode DefaultsMode
// The RuntimeEnvironment configuration, only populated if the DefaultsMode
// is set to DefaultsModeAuto and is initialized by
// `config.LoadDefaultConfig`. You should not populate this structure
// programmatically, or rely on the values here within your applications.
RuntimeEnvironment RuntimeEnvironment
}
// NewConfig returns a new Config pointer that can be chained with builder

View File

@ -2,6 +2,7 @@ package aws
import (
"context"
"fmt"
"sync/atomic"
"time"
@ -24,11 +25,13 @@ type CredentialsCacheOptions struct {
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// ExpiryWindowJitterFrac provides a mechanism for randomizing the expiration of credentials
// within the configured ExpiryWindow by a random percentage. Valid values are between 0.0 and 1.0.
// ExpiryWindowJitterFrac provides a mechanism for randomizing the
// expiration of credentials within the configured ExpiryWindow by a random
// percentage. Valid values are between 0.0 and 1.0.
//
// As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac is 0.5 then credentials will be set to
// expire between 30 to 60 seconds prior to their actual expiration time.
// As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac
// is 0.5 then credentials will be set to expire between 30 to 60 seconds
// prior to their actual expiration time.
//
// If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored.
// If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window.
@ -39,8 +42,19 @@ type CredentialsCacheOptions struct {
// CredentialsCache provides caching and concurrency safe credentials retrieval
// via the provider's retrieve method.
//
// CredentialsCache will look for optional interfaces on the Provider to adjust
// how the credential cache handles credentials caching.
//
// * HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle
// credential refresh failures. This could return an updated Credentials
// value, or attempt another means of retrieving credentials.
//
// * AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how
// credentials Expires is modified. This could modify how the Credentials
// Expires is adjusted based on the CredentialsCache ExpiryWindow option.
// Such as providing a floor not to reduce the Expires below.
type CredentialsCache struct {
// provider is the CredentialProvider implementation to be wrapped by the CredentialCache.
provider CredentialsProvider
options CredentialsCacheOptions
@ -48,8 +62,9 @@ type CredentialsCache struct {
sf singleflight.Group
}
// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider is expected to not be nil. A variadic
// list of one or more functions can be provided to modify the CredentialsCache configuration. This allows for
// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider
// is expected to not be nil. A variadic list of one or more functions can be
// provided to modify the CredentialsCache configuration. This allows for
// configuration of credential expiry window and jitter.
func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache {
options := CredentialsCacheOptions{}
@ -81,8 +96,8 @@ func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *C
//
// Returns and error if the provider's retrieve method returns an error.
func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) {
if creds := p.getCreds(); creds != nil {
return *creds, nil
if creds, ok := p.getCreds(); ok && !creds.Expired() {
return creds, nil
}
resCh := p.sf.DoChan("", func() (interface{}, error) {
@ -97,39 +112,64 @@ func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) {
}
func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) {
if creds := p.getCreds(); creds != nil {
return *creds, nil
currCreds, ok := p.getCreds()
if ok && !currCreds.Expired() {
return currCreds, nil
}
newCreds, err := p.provider.Retrieve(ctx)
if err != nil {
handleFailToRefresh := defaultHandleFailToRefresh
if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok {
handleFailToRefresh = cs.HandleFailToRefresh
}
newCreds, err = handleFailToRefresh(ctx, currCreds, err)
if err != nil {
return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err)
}
}
if newCreds.CanExpire && p.options.ExpiryWindow > 0 {
adjustExpiresBy := defaultAdjustExpiresBy
if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok {
adjustExpiresBy = cs.AdjustExpiresBy
}
creds, err := p.provider.Retrieve(ctx)
if err == nil {
if creds.CanExpire {
randFloat64, err := sdkrand.CryptoRandFloat64()
if err != nil {
return Credentials{}, err
}
jitter := time.Duration(randFloat64 * p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow))
creds.Expires = creds.Expires.Add(-(p.options.ExpiryWindow - jitter))
return Credentials{}, fmt.Errorf("failed to get random provider, %w", err)
}
p.creds.Store(&creds)
var jitter time.Duration
if p.options.ExpiryWindowJitterFrac > 0 {
jitter = time.Duration(randFloat64 *
p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow))
}
return creds, err
newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter))
if err != nil {
return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err)
}
}
p.creds.Store(&newCreds)
return newCreds, nil
}
func (p *CredentialsCache) getCreds() *Credentials {
// getCreds returns the currently stored credentials and true. Returning false
// if no credentials were stored.
func (p *CredentialsCache) getCreds() (Credentials, bool) {
v := p.creds.Load()
if v == nil {
return nil
return Credentials{}, false
}
c := v.(*Credentials)
if c != nil && c.HasKeys() && !c.Expired() {
return c
if c == nil || !c.HasKeys() {
return Credentials{}, false
}
return nil
return *c, true
}
// Invalidate will invalidate the cached credentials. The next call to Retrieve
@ -137,3 +177,42 @@ func (p *CredentialsCache) getCreds() *Credentials {
func (p *CredentialsCache) Invalidate() {
p.creds.Store((*Credentials)(nil))
}
// HandleFailRefreshCredentialsCacheStrategy is an interface for
// CredentialsCache to allow CredentialsProvider how failed to refresh
// credentials is handled.
type HandleFailRefreshCredentialsCacheStrategy interface {
// Given the previously cached Credentials, if any, and refresh error, may
// returns new or modified set of Credentials, or error.
//
// Credential caches may use default implementation if nil.
HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error)
}
// defaultHandleFailToRefresh returns the passed in error.
func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) {
return Credentials{}, err
}
// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache
// to allow CredentialsProvider to intercept adjustments to Credentials expiry
// based on expectations and use cases of CredentialsProvider.
//
// Credential caches may use default implementation if nil.
type AdjustExpiresByCredentialsCacheStrategy interface {
// Given a Credentials as input, applying any mutations and
// returning the potentially updated Credentials, or error.
AdjustExpiresBy(Credentials, time.Duration) (Credentials, error)
}
// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires,
// and returns the updated credentials value. If Credentials value's CanExpire
// is false, the passed in credentials are returned unchanged.
func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) {
if !creds.CanExpire {
return creds, nil
}
creds.Expires = creds.Expires.Add(dur)
return creds, nil
}

View File

@ -83,16 +83,20 @@ type Credentials struct {
// Source of the credentials
Source string
// Time the credentials will expire.
// States if the credentials can expire or not.
CanExpire bool
// The time the credentials will expire at. Should be ignored if CanExpire
// is false.
Expires time.Time
}
// Expired returns if the credentials have expired.
func (v Credentials) Expired() bool {
if v.CanExpire {
// Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry
// time is always based on reported wall-clock time.
// Calling Round(0) on the current time will truncate the monotonic
// reading only. Ensures credential expiry time is always based on
// reported wall-clock time.
return !v.Expires.After(sdk.NowTime().Round(0))
}

View File

@ -0,0 +1,38 @@
package defaults
import (
"github.com/aws/aws-sdk-go-v2/aws"
"runtime"
"strings"
)
var getGOOS = func() string {
return runtime.GOOS
}
// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode
// is set to aws.DefaultsModeAuto.
func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode {
goos := getGOOS()
if goos == "android" || goos == "ios" {
return aws.DefaultsModeMobile
}
var currentRegion string
if len(environment.EnvironmentIdentifier) > 0 {
currentRegion = environment.Region
}
if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 {
currentRegion = environment.EC2InstanceMetadataRegion
}
if len(region) > 0 && len(currentRegion) > 0 {
if strings.EqualFold(region, currentRegion) {
return aws.DefaultsModeInRegion
}
return aws.DefaultsModeCrossRegion
}
return aws.DefaultsModeStandard
}

View File

@ -0,0 +1,43 @@
package defaults
import (
"time"
"github.com/aws/aws-sdk-go-v2/aws"
)
// Configuration is the set of SDK configuration options that are determined based
// on the configured DefaultsMode.
type Configuration struct {
// RetryMode is the configuration's default retry mode API clients should
// use for constructing a Retryer.
RetryMode aws.RetryMode
// ConnectTimeout is the maximum amount of time a dial will wait for
// a connect to complete.
//
// See https://pkg.go.dev/net#Dialer.Timeout
ConnectTimeout *time.Duration
// TLSNegotiationTimeout specifies the maximum amount of time waiting to
// wait for a TLS handshake.
//
// See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout
TLSNegotiationTimeout *time.Duration
}
// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set.
func (c *Configuration) GetConnectTimeout() (time.Duration, bool) {
if c.ConnectTimeout == nil {
return 0, false
}
return *c.ConnectTimeout, true
}
// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set.
func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) {
if c.TLSNegotiationTimeout == nil {
return 0, false
}
return *c.TLSNegotiationTimeout, true
}

View File

@ -0,0 +1,50 @@
// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT.
package defaults
import (
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"time"
)
// GetModeConfiguration returns the default Configuration descriptor for the given mode.
//
// Supports the following modes: cross-region, in-region, mobile, standard
func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) {
var mv aws.DefaultsMode
mv.SetFromString(string(mode))
switch mv {
case aws.DefaultsModeCrossRegion:
settings := Configuration{
ConnectTimeout: aws.Duration(3100 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond),
}
return settings, nil
case aws.DefaultsModeInRegion:
settings := Configuration{
ConnectTimeout: aws.Duration(1100 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond),
}
return settings, nil
case aws.DefaultsModeMobile:
settings := Configuration{
ConnectTimeout: aws.Duration(30000 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond),
}
return settings, nil
case aws.DefaultsModeStandard:
settings := Configuration{
ConnectTimeout: aws.Duration(3100 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond),
}
return settings, nil
default:
return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode)
}
}

View File

@ -0,0 +1,2 @@
// Package defaults provides recommended configuration values for AWS SDKs and CLIs.
package defaults

View File

@ -0,0 +1,95 @@
// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT.
package aws
import (
"strings"
)
// DefaultsMode is the SDK defaults mode setting.
type DefaultsMode string
// The DefaultsMode constants.
const (
// DefaultsModeAuto is an experimental mode that builds on the standard mode.
// The SDK will attempt to discover the execution environment to determine the
// appropriate settings automatically.
//
// Note that the auto detection is heuristics-based and does not guarantee 100%
// accuracy. STANDARD mode will be used if the execution environment cannot
// be determined. The auto detection might query EC2 Instance Metadata service
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html),
// which might introduce latency. Therefore we recommend choosing an explicit
// defaults_mode instead if startup latency is critical to your application
DefaultsModeAuto DefaultsMode = "auto"
// DefaultsModeCrossRegion builds on the standard mode and includes optimization
// tailored for applications which call AWS services in a different region
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeCrossRegion DefaultsMode = "cross-region"
// DefaultsModeInRegion builds on the standard mode and includes optimization
// tailored for applications which call AWS services from within the same AWS
// region
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeInRegion DefaultsMode = "in-region"
// DefaultsModeLegacy provides default settings that vary per SDK and were used
// prior to establishment of defaults_mode
DefaultsModeLegacy DefaultsMode = "legacy"
// DefaultsModeMobile builds on the standard mode and includes optimization
// tailored for mobile applications
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeMobile DefaultsMode = "mobile"
// DefaultsModeStandard provides the latest recommended default values that
// should be safe to run in most scenarios
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeStandard DefaultsMode = "standard"
)
// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches
// the provided string when compared using EqualFold. If the value does not match a known
// constant it will be set to as-is and the function will return false. As a special case, if the
// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode.
func (d *DefaultsMode) SetFromString(v string) (ok bool) {
switch {
case strings.EqualFold(v, string(DefaultsModeAuto)):
*d = DefaultsModeAuto
ok = true
case strings.EqualFold(v, string(DefaultsModeCrossRegion)):
*d = DefaultsModeCrossRegion
ok = true
case strings.EqualFold(v, string(DefaultsModeInRegion)):
*d = DefaultsModeInRegion
ok = true
case strings.EqualFold(v, string(DefaultsModeLegacy)):
*d = DefaultsModeLegacy
ok = true
case strings.EqualFold(v, string(DefaultsModeMobile)):
*d = DefaultsModeMobile
ok = true
case strings.EqualFold(v, string(DefaultsModeStandard)):
*d = DefaultsModeStandard
ok = true
case len(v) == 0:
*d = DefaultsModeLegacy
ok = true
default:
*d = DefaultsMode(v)
}
return ok
}

View File

@ -160,22 +160,28 @@ func (e *EndpointNotFoundError) Unwrap() error {
// available. If the EndpointResolver returns an EndpointNotFoundError error,
// API clients will fallback to attempting to resolve the endpoint using its
// internal default endpoint resolver.
//
// Deprecated: See EndpointResolverWithOptions
type EndpointResolver interface {
ResolveEndpoint(service, region string) (Endpoint, error)
}
// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface.
//
// Deprecated: See EndpointResolverWithOptionsFunc
type EndpointResolverFunc func(service, region string) (Endpoint, error)
// ResolveEndpoint calls the wrapped function and returns the results.
//
// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint
func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) {
return e(service, region)
}
// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or
// override an endpoint for the given service, region, and the service clients EndpointOptions. API clients will
// attempt to use the EndpointResolver first to resolve an endpoint if
// available. If the EndpointResolver returns an EndpointNotFoundError error,
// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will
// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if
// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error,
// API clients will fallback to attempting to resolve the endpoint using its
// internal default endpoint resolver.
type EndpointResolverWithOptions interface {
@ -183,11 +189,11 @@ type EndpointResolverWithOptions interface {
}
// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface.
type EndpointResolverWithOptionsFunc func(service, region string, options interface{}) (Endpoint, error)
type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error)
// ResolveEndpoint calls the wrapped function and returns the results.
func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options interface{}) (Endpoint, error) {
return e(service, region, options)
func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) {
return e(service, region, options...)
}
// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value.

View File

@ -342,3 +342,24 @@ func ToTimeSlice(vs []*time.Time) []time.Time {
func ToTimeMap(vs map[string]*time.Time) map[string]time.Time {
return ptr.ToTimeMap(vs)
}
// ToDuration returns time.Duration value dereferenced if the passed
// in pointer was not nil. Returns a time.Duration zero value if the
// pointer was nil.
func ToDuration(p *time.Duration) (v time.Duration) {
return ptr.ToDuration(p)
}
// ToDurationSlice returns a slice of time.Duration values, that are
// dereferenced if the passed in pointer was not nil. Returns a time.Duration
// zero value if the pointer was nil.
func ToDurationSlice(vs []*time.Duration) []time.Duration {
return ptr.ToDurationSlice(vs)
}
// ToDurationMap returns a map of time.Duration values, that are
// dereferenced if the passed in pointer was not nil. The time.Duration
// zero value is used if the pointer was nil.
func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration {
return ptr.ToDurationMap(vs)
}

View File

@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.11.0"
const goModuleVersion = "1.16.5"

View File

@ -1,3 +1,27 @@
# v1.4.2 (2022-06-07)
* No change notes available for this release.
# v1.4.1 (2022-03-24)
* No change notes available for this release.
# v1.4.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.3.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.2.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.1.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.0.0 (2021-11-06)
* **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release.

View File

@ -3,4 +3,4 @@
package eventstream
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.0.0"
const goModuleVersion = "1.4.2"

View File

@ -7,8 +7,9 @@ import (
// TokenBucket provides a concurrency safe utility for adding and removing
// tokens from the available token bucket.
type TokenBucket struct {
capacity uint
remainingTokens uint
maxCapacity uint
minCapacity uint
mu sync.Mutex
}
@ -16,8 +17,9 @@ type TokenBucket struct {
// specified.
func NewTokenBucket(i uint) *TokenBucket {
return &TokenBucket{
capacity: i,
remainingTokens: i,
maxCapacity: i,
minCapacity: 1,
}
}
@ -25,17 +27,18 @@ func NewTokenBucket(i uint) *TokenBucket {
// there are tokens available true will be returned along with the number of
// available tokens remaining. If amount requested is larger than the available
// capacity, false will be returned along with the available capacity. If the
// amount is less than the available capacity
// amount is less than the available capacity, the capacity will be reduced by
// that amount, and the remaining capacity and true will be returned.
func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) {
t.mu.Lock()
defer t.mu.Unlock()
if amount > t.capacity {
return t.capacity, false
if amount > t.remainingTokens {
return t.remainingTokens, false
}
t.capacity -= amount
return t.capacity, true
t.remainingTokens -= amount
return t.remainingTokens, true
}
// Refund returns the amount of tokens back to the available token bucket, up
@ -44,8 +47,50 @@ func (t *TokenBucket) Refund(amount uint) {
t.mu.Lock()
defer t.mu.Unlock()
t.capacity += amount
if t.capacity > t.maxCapacity {
t.capacity = t.maxCapacity
}
// Capacity cannot exceed max capacity.
t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity)
}
// Capacity returns the maximum capacity of tokens that the bucket could
// contain.
func (t *TokenBucket) Capacity() uint {
t.mu.Lock()
defer t.mu.Unlock()
return t.maxCapacity
}
// Remaining returns the number of tokens that remaining in the bucket.
func (t *TokenBucket) Remaining() uint {
t.mu.Lock()
defer t.mu.Unlock()
return t.remainingTokens
}
// Resize adjusts the size of the token bucket. Returns the capacity remaining.
func (t *TokenBucket) Resize(size uint) uint {
t.mu.Lock()
defer t.mu.Unlock()
t.maxCapacity = uintMax(size, t.minCapacity)
// Capacity needs to be capped at max capacity, if max size reduced.
t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity)
return t.remainingTokens
}
func uintMin(a, b uint) uint {
if a < b {
return a
}
return b
}
func uintMax(a, b uint) uint {
if a > b {
return a
}
return b
}

View File

@ -69,6 +69,11 @@ func (l *TokenRateLimit) AddTokens(v uint) error {
return nil
}
// Remaining returns the number of remaining tokens in the bucket.
func (l *TokenRateLimit) Remaining() uint {
return l.bucket.Remaining()
}
// QuotaExceededError provides the SDK error when the retries for a given
// token bucket have been exhausted.
type QuotaExceededError struct {

View File

@ -0,0 +1,156 @@
package retry
import (
"context"
"fmt"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
)
const (
// DefaultRequestCost is the cost of a single request from the adaptive
// rate limited token bucket.
DefaultRequestCost uint = 1
)
// DefaultThrottles provides the set of errors considered throttle errors that
// are checked by default.
var DefaultThrottles = []IsErrorThrottle{
ThrottleErrorCode{
Codes: DefaultThrottleErrorCodes,
},
}
// AdaptiveModeOptions provides the functional options for configuring the
// adaptive retry mode, and delay behavior.
type AdaptiveModeOptions struct {
// If the adaptive token bucket is empty, when an attempt will be made
// AdaptiveMode will sleep until a token is available. This can occur when
// attempts fail with throttle errors. Use this option to disable the sleep
// until token is available, and return error immediately.
FailOnNoAttemptTokens bool
// The cost of an attempt from the AdaptiveMode's adaptive token bucket.
RequestCost uint
// Set of strategies to determine if the attempt failed due to a throttle
// error.
//
// It is safe to append to this list in NewAdaptiveMode's functional options.
Throttles []IsErrorThrottle
// Set of options for standard retry mode that AdaptiveMode is built on top
// of. AdaptiveMode may apply its own defaults to Standard retry mode that
// are different than the defaults of NewStandard. Use these options to
// override the default options.
StandardOptions []func(*StandardOptions)
}
// AdaptiveMode provides an experimental retry strategy that expands on the
// Standard retry strategy, adding client attempt rate limits. The attempt rate
// limit is initially unrestricted, but becomes restricted when the attempt
// fails with for a throttle error. When restricted AdaptiveMode may need to
// sleep before an attempt is made, if too many throttles have been received.
// AdaptiveMode's sleep can be canceled with context cancel. Set
// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep,
// to fail fast.
//
// Eventually unrestricted attempt rate limit will be restored once attempts no
// longer are failing due to throttle errors.
type AdaptiveMode struct {
options AdaptiveModeOptions
throttles IsErrorThrottles
retryer aws.RetryerV2
rateLimit *adaptiveRateLimit
}
// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy.
func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode {
o := AdaptiveModeOptions{
RequestCost: DefaultRequestCost,
Throttles: append([]IsErrorThrottle{}, DefaultThrottles...),
}
for _, fn := range optFns {
fn(&o)
}
return &AdaptiveMode{
options: o,
throttles: IsErrorThrottles(o.Throttles),
retryer: NewStandard(o.StandardOptions...),
rateLimit: newAdaptiveRateLimit(),
}
}
// IsErrorRetryable returns if the failed attempt is retryable. This check
// should determine if the error can be retried, or if the error is
// terminal.
func (a *AdaptiveMode) IsErrorRetryable(err error) bool {
return a.retryer.IsErrorRetryable(err)
}
// MaxAttempts returns the maximum number of attempts that can be made for
// a attempt before failing. A value of 0 implies that the attempt should
// be retried until it succeeds if the errors are retryable.
func (a *AdaptiveMode) MaxAttempts() int {
return a.retryer.MaxAttempts()
}
// RetryDelay returns the delay that should be used before retrying the
// attempt. Will return error if the if the delay could not be determined.
func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) (
time.Duration, error,
) {
return a.retryer.RetryDelay(attempt, opErr)
}
// GetRetryToken attempts to deduct the retry cost from the retry token pool.
// Returning the token release function, or error.
func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) (
releaseToken func(error) error, err error,
) {
return a.retryer.GetRetryToken(ctx, opErr)
}
// GetInitialToken returns the initial attempt token that can increment the
// retry token pool if the attempt is successful.
//
// Deprecated: This method does not provide a way to block using Context,
// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only
// present to implement Retryer interface.
func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) {
return nopRelease
}
// GetAttemptToken returns the attempt token that can be used to rate limit
// attempt calls. Will be used by the SDK's retry package's Attempt
// middleware to get a attempt token prior to calling the temp and releasing
// the attempt token after the attempt has been made.
func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) {
for {
acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost)
if acquiredToken {
break
}
if a.options.FailOnNoAttemptTokens {
return nil, fmt.Errorf(
"unable to get attempt token, and FailOnNoAttemptTokens enables")
}
if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil {
return nil, fmt.Errorf("failed to wait for token to be available, %w", err)
}
}
return a.handleResponse, nil
}
func (a *AdaptiveMode) handleResponse(opErr error) error {
throttled := a.throttles.IsErrorThrottle(opErr).Bool()
a.rateLimit.Update(throttled)
return nil
}

View File

@ -0,0 +1,158 @@
package retry
import (
"math"
"sync"
"time"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
)
type adaptiveRateLimit struct {
tokenBucketEnabled bool
smooth float64
beta float64
scaleConstant float64
minFillRate float64
fillRate float64
calculatedRate float64
lastRefilled time.Time
measuredTxRate float64
lastTxRateBucket float64
requestCount int64
lastMaxRate float64
lastThrottleTime time.Time
timeWindow float64
tokenBucket *adaptiveTokenBucket
mu sync.Mutex
}
func newAdaptiveRateLimit() *adaptiveRateLimit {
now := sdk.NowTime()
return &adaptiveRateLimit{
smooth: 0.8,
beta: 0.7,
scaleConstant: 0.4,
minFillRate: 0.5,
lastTxRateBucket: math.Floor(timeFloat64Seconds(now)),
lastThrottleTime: now,
tokenBucket: newAdaptiveTokenBucket(0),
}
}
func (a *adaptiveRateLimit) Enable(v bool) {
a.mu.Lock()
defer a.mu.Unlock()
a.tokenBucketEnabled = v
}
func (a *adaptiveRateLimit) AcquireToken(amount uint) (
tokenAcquired bool, waitTryAgain time.Duration,
) {
a.mu.Lock()
defer a.mu.Unlock()
if !a.tokenBucketEnabled {
return true, 0
}
a.tokenBucketRefill()
available, ok := a.tokenBucket.Retrieve(float64(amount))
if !ok {
waitDur := float64Seconds((float64(amount) - available) / a.fillRate)
return false, waitDur
}
return true, 0
}
func (a *adaptiveRateLimit) Update(throttled bool) {
a.mu.Lock()
defer a.mu.Unlock()
a.updateMeasuredRate()
if throttled {
rateToUse := a.measuredTxRate
if a.tokenBucketEnabled {
rateToUse = math.Min(a.measuredTxRate, a.fillRate)
}
a.lastMaxRate = rateToUse
a.calculateTimeWindow()
a.lastThrottleTime = sdk.NowTime()
a.calculatedRate = a.cubicThrottle(rateToUse)
a.tokenBucketEnabled = true
} else {
a.calculateTimeWindow()
a.calculatedRate = a.cubicSuccess(sdk.NowTime())
}
newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate)
a.tokenBucketUpdateRate(newRate)
}
func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 {
dt := secondsFloat64(t.Sub(a.lastThrottleTime))
return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate
}
func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 {
return rateToUse * a.beta
}
func (a *adaptiveRateLimit) calculateTimeWindow() {
a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.)
}
func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) {
a.tokenBucketRefill()
a.fillRate = math.Max(newRPS, a.minFillRate)
a.tokenBucket.Resize(newRPS)
}
func (a *adaptiveRateLimit) updateMeasuredRate() {
now := sdk.NowTime()
timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2.
a.requestCount++
if timeBucket > a.lastTxRateBucket {
currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket)
a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth))
a.requestCount = 0
a.lastTxRateBucket = timeBucket
}
}
func (a *adaptiveRateLimit) tokenBucketRefill() {
now := sdk.NowTime()
if a.lastRefilled.IsZero() {
a.lastRefilled = now
return
}
fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate
a.tokenBucket.Refund(fillAmount)
a.lastRefilled = now
}
func float64Seconds(v float64) time.Duration {
return time.Duration(v * float64(time.Second))
}
func secondsFloat64(v time.Duration) float64 {
return float64(v) / float64(time.Second)
}
func timeFloat64Seconds(v time.Time) float64 {
return float64(v.UnixNano()) / float64(time.Second)
}

View File

@ -0,0 +1,83 @@
package retry
import (
"math"
"sync"
)
// adaptiveTokenBucket provides a concurrency safe utility for adding and
// removing tokens from the available token bucket.
type adaptiveTokenBucket struct {
remainingTokens float64
maxCapacity float64
minCapacity float64
mu sync.Mutex
}
// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the
// capacity specified.
func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket {
return &adaptiveTokenBucket{
remainingTokens: i,
maxCapacity: i,
minCapacity: 1,
}
}
// Retrieve attempts to reduce the available tokens by the amount requested. If
// there are tokens available true will be returned along with the number of
// available tokens remaining. If amount requested is larger than the available
// capacity, false will be returned along with the available capacity. If the
// amount is less than the available capacity, the capacity will be reduced by
// that amount, and the remaining capacity and true will be returned.
func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) {
t.mu.Lock()
defer t.mu.Unlock()
if amount > t.remainingTokens {
return t.remainingTokens, false
}
t.remainingTokens -= amount
return t.remainingTokens, true
}
// Refund returns the amount of tokens back to the available token bucket, up
// to the initial capacity.
func (t *adaptiveTokenBucket) Refund(amount float64) {
t.mu.Lock()
defer t.mu.Unlock()
// Capacity cannot exceed max capacity.
t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity)
}
// Capacity returns the maximum capacity of tokens that the bucket could
// contain.
func (t *adaptiveTokenBucket) Capacity() float64 {
t.mu.Lock()
defer t.mu.Unlock()
return t.maxCapacity
}
// Remaining returns the number of tokens that remaining in the bucket.
func (t *adaptiveTokenBucket) Remaining() float64 {
t.mu.Lock()
defer t.mu.Unlock()
return t.remainingTokens
}
// Resize adjusts the size of the token bucket. Returns the capacity remaining.
func (t *adaptiveTokenBucket) Resize(size float64) float64 {
t.mu.Lock()
defer t.mu.Unlock()
t.maxCapacity = math.Max(size, t.minCapacity)
// Capacity needs to be capped at max capacity, if max size reduced.
t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity)
return t.remainingTokens
}

View File

@ -2,9 +2,9 @@
//
// Retryer Interface and Implementations
//
// This packages defines Retryer interface that is used to either implement custom retry behavior
// or to extend the existing retry implementations provided by the SDK. This packages provides a single
// retry implementations: Standard.
// This package defines Retryer interface that is used to either implement custom retry behavior
// or to extend the existing retry implementations provided by the SDK. This package provides a single
// retry implementation: Standard.
//
// Standard
//
@ -33,7 +33,7 @@
// value.
//
// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer
// using the NewStandard function, and providing one more functional arguments that mutate the StandardOptions
// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions
// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions,
// and the retry delay policy.
//
@ -71,7 +71,7 @@
// standard retryer.
//
// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example,
// this can be used to extend the standard retryer to add additional logic ot determine if a
// this can be used to extend the standard retryer to add additional logic to determine if an
// error should be retried.
//
// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example,

View File

@ -16,8 +16,8 @@ import (
"github.com/aws/smithy-go/transport/http"
)
// RequestCloner is a function that can take an input request type and clone the request
// for use in a subsequent retry attempt
// RequestCloner is a function that can take an input request type and clone
// the request for use in a subsequent retry attempt.
type RequestCloner func(interface{}) interface{}
type retryMetadata struct {
@ -27,20 +27,24 @@ type retryMetadata struct {
AttemptClockSkew time.Duration
}
// Attempt is a Smithy FinalizeMiddleware that handles retry attempts using the provided
// Retryer implementation
// Attempt is a Smithy Finalize middleware that handles retry attempts using
// the provided Retryer implementation.
type Attempt struct {
// Enable the logging of retry attempts performed by the SDK.
// This will include logging retry attempts, unretryable errors, and when max attempts are reached.
// Enable the logging of retry attempts performed by the SDK. This will
// include logging retry attempts, unretryable errors, and when max
// attempts are reached.
LogAttempts bool
retryer aws.Retryer
retryer aws.RetryerV2
requestCloner RequestCloner
}
// NewAttemptMiddleware returns a new Attempt retry middleware.
func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt {
m := &Attempt{retryer: retryer, requestCloner: requestCloner}
m := &Attempt{
retryer: wrapAsRetryerV2(retryer),
requestCloner: requestCloner,
}
for _, fn := range optFns {
fn(m)
}
@ -48,9 +52,7 @@ func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optF
}
// ID returns the middleware identifier
func (r *Attempt) ID() string {
return "Retry"
}
func (r *Attempt) ID() string { return "Retry" }
func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) {
if !r.LogAttempts {
@ -59,8 +61,9 @@ func (r Attempt) logf(logger logging.Logger, classification logging.Classificati
logger.Logf(classification, format, v...)
}
// HandleFinalize utilizes the provider Retryer implementation to attempt retries over the next handler
func (r Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (
// HandleFinalize utilizes the provider Retryer implementation to attempt
// retries over the next handler
func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (
out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error,
) {
var attemptNum int
@ -68,12 +71,14 @@ func (r Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInp
var attemptResults AttemptResults
maxAttempts := r.retryer.MaxAttempts()
releaseRetryToken := nopRelease
for {
attemptNum++
attemptInput := in
attemptInput.Request = r.requestCloner(attemptInput.Request)
// Record the metadata for the for attempt being started.
attemptCtx := setRetryMetadata(ctx, retryMetadata{
AttemptNum: attemptNum,
AttemptTime: sdk.NowTime().UTC(),
@ -82,23 +87,20 @@ func (r Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInp
})
var attemptResult AttemptResult
out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next)
attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata)
out, attemptResult, err = r.handleAttempt(attemptCtx, attemptInput, next)
var ok bool
attemptClockSkew, ok = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata)
if !ok {
attemptClockSkew = 0
}
// AttempResult Retried states that the attempt was not successful, and
// should be retried.
shouldRetry := attemptResult.Retried
// add attempt metadata to list of all attempt metadata
// Add attempt metadata to list of all attempt metadata
attemptResults.Results = append(attemptResults.Results, attemptResult)
if !shouldRetry {
// Ensure the last response's metadata is used as the bases for result
// metadata returned by the stack.
// metadata returned by the stack. The Slice of attempt results
// will be added to this cloned metadata.
metadata = attemptResult.ResponseMetadata.Clone()
break
@ -110,81 +112,132 @@ func (r Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInp
}
// handleAttempt handles an individual request attempt.
func (r Attempt) handleAttempt(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (
out smithymiddle.FinalizeOutput, attemptResult AttemptResult, err error,
func (r *Attempt) handleAttempt(
ctx context.Context, in smithymiddle.FinalizeInput, releaseRetryToken func(error) error, next smithymiddle.FinalizeHandler,
) (
out smithymiddle.FinalizeOutput, attemptResult AttemptResult, _ func(error) error, err error,
) {
defer func() {
attemptResult.Err = err
}()
relRetryToken := r.retryer.GetInitialToken()
// Short circuit if this attempt never can succeed because the context is
// canceled. This reduces the chance of token pools being modified for
// attempts that will not be made
select {
case <-ctx.Done():
return out, attemptResult, nopRelease, ctx.Err()
default:
}
//------------------------------
// Get Attempt Token
//------------------------------
releaseAttemptToken, err := r.retryer.GetAttemptToken(ctx)
if err != nil {
return out, attemptResult, nopRelease, fmt.Errorf(
"failed to get retry Send token, %w", err)
}
//------------------------------
// Send Attempt
//------------------------------
logger := smithymiddle.GetLogger(ctx)
service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx)
retryMetadata, _ := getRetryMetadata(ctx)
attemptNum := retryMetadata.AttemptNum
maxAttempts := retryMetadata.MaxAttempts
// Following attempts must ensure the request payload stream starts in a
// rewound state.
if attemptNum > 1 {
if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok {
if rewindErr := rewindable.RewindStream(); rewindErr != nil {
err = fmt.Errorf("failed to rewind transport stream for retry, %w", rewindErr)
return out, attemptResult, err
return out, attemptResult, nopRelease, fmt.Errorf(
"failed to rewind transport stream for retry, %w", rewindErr)
}
}
r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d", service, operation, attemptNum)
r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d",
service, operation, attemptNum)
}
var metadata smithymiddle.Metadata
out, metadata, err = next.HandleFinalize(ctx, in)
attemptResult.ResponseMetadata = metadata
if releaseError := relRetryToken(err); releaseError != nil && err != nil {
err = fmt.Errorf("failed to release token after request error, %w", err)
return out, attemptResult, err
//------------------------------
// Bookkeeping
//------------------------------
// Release the retry token based on the state of the attempt's error (if any).
if releaseError := releaseRetryToken(err); releaseError != nil && err != nil {
return out, attemptResult, nopRelease, fmt.Errorf(
"failed to release retry token after request error, %w", err)
}
// Release the attempt token based on the state of the attempt's error (if any).
if releaseError := releaseAttemptToken(err); releaseError != nil && err != nil {
return out, attemptResult, nopRelease, fmt.Errorf(
"failed to release initial token after request error, %w", err)
}
// If there was no error making the attempt, nothing further to do. There
// will be nothing to retry.
if err == nil {
return out, attemptResult, err
return out, attemptResult, nopRelease, err
}
//------------------------------
// Is Retryable and Should Retry
//------------------------------
// If the attempt failed with an unretryable error, nothing further to do
// but return, and inform the caller about the terminal failure.
retryable := r.retryer.IsErrorRetryable(err)
if !retryable {
r.logf(logger, logging.Debug, "request failed with unretryable error %v", err)
return out, attemptResult, err
return out, attemptResult, nopRelease, err
}
// set retryable to true
attemptResult.Retryable = true
// Once the maximum number of attempts have been exhausted there is nothing
// further to do other than inform the caller about the terminal failure.
if maxAttempts > 0 && attemptNum >= maxAttempts {
r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts)
err = &MaxAttemptsError{
Attempt: attemptNum,
Err: err,
}
return out, attemptResult, err
return out, attemptResult, nopRelease, err
}
relRetryToken, reqErr := r.retryer.GetRetryToken(ctx, err)
if reqErr != nil {
return out, attemptResult, reqErr
//------------------------------
// Get Retry (aka Retry Quota) Token
//------------------------------
// Get a retry token that will be released after the
releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err)
if retryTokenErr != nil {
return out, attemptResult, nopRelease, retryTokenErr
}
//------------------------------
// Retry Delay and Sleep
//------------------------------
// Get the retry delay before another attempt can be made, and sleep for
// that time. Potentially early exist if the sleep is canceled via the
// context.
retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err)
if reqErr != nil {
return out, attemptResult, reqErr
return out, attemptResult, releaseRetryToken, reqErr
}
if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil {
err = &aws.RequestCanceledError{Err: reqErr}
return out, attemptResult, err
return out, attemptResult, releaseRetryToken, err
}
// The request should be re-attempted.
attemptResult.Retried = true
return out, attemptResult, err
return out, attemptResult, releaseRetryToken, err
}
// MetricsHeader attaches SDK request metric header for retries to the transport
@ -195,7 +248,7 @@ func (r *MetricsHeader) ID() string {
return "RetryMetricsHeader"
}
// HandleFinalize attaches the sdk request metric header to the transport layer
// HandleFinalize attaches the SDK request metric header to the transport layer
func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (
out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error,
) {
@ -251,13 +304,14 @@ func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Conte
return middleware.WithStackValue(ctx, retryMetadataKey{}, metadata)
}
// AddRetryMiddlewaresOptions is the set of options that can be passed to AddRetryMiddlewares for configuring retry
// associated middleware.
// AddRetryMiddlewaresOptions is the set of options that can be passed to
// AddRetryMiddlewares for configuring retry associated middleware.
type AddRetryMiddlewaresOptions struct {
Retryer aws.Retryer
// Enable the logging of retry attempts performed by the SDK.
// This will include logging retry attempts, unretryable errors, and when max attempts are reached.
// Enable the logging of retry attempts performed by the SDK. This will
// include logging retry attempts, unretryable errors, and when max
// attempts are reached.
LogRetryAttempts bool
}

View File

@ -1,6 +1,7 @@
package retry
import (
"context"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
@ -17,13 +18,13 @@ func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer {
}
return &withIsErrorRetryable{
Retryer: r,
RetryerV2: wrapAsRetryerV2(r),
Retryable: retryable,
}
}
type withIsErrorRetryable struct {
aws.Retryer
aws.RetryerV2
Retryable IsErrorRetryable
}
@ -31,20 +32,20 @@ func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool {
if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary {
return v.Bool()
}
return r.Retryer.IsErrorRetryable(err)
return r.RetryerV2.IsErrorRetryable(err)
}
// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value
// specified.
func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer {
return &withMaxAttempts{
Retryer: r,
RetryerV2: wrapAsRetryerV2(r),
Max: max,
}
}
type withMaxAttempts struct {
aws.Retryer
aws.RetryerV2
Max int
}
@ -57,16 +58,33 @@ func (w *withMaxAttempts) MaxAttempts() int {
// delay.
func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer {
return &withMaxBackoffDelay{
Retryer: r,
RetryerV2: wrapAsRetryerV2(r),
backoff: NewExponentialJitterBackoff(delay),
}
}
type withMaxBackoffDelay struct {
aws.Retryer
aws.RetryerV2
backoff *ExponentialJitterBackoff
}
func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) {
return r.backoff.BackoffDelay(attempt, err)
}
type wrappedAsRetryerV2 struct {
aws.Retryer
}
func wrapAsRetryerV2(r aws.Retryer) aws.RetryerV2 {
v, ok := r.(aws.RetryerV2)
if !ok {
v = wrappedAsRetryerV2{Retryer: r}
}
return v
}
func (w wrappedAsRetryerV2) GetAttemptToken(context.Context) (func(error) error, error) {
return w.Retryer.GetInitialToken(), nil
}

View File

@ -2,6 +2,7 @@ package retry
import (
"context"
"fmt"
"time"
"github.com/aws/aws-sdk-go-v2/aws/ratelimit"
@ -52,8 +53,11 @@ var DefaultRetryableHTTPStatusCodes = map[int]struct{}{
var DefaultRetryableErrorCodes = map[string]struct{}{
"RequestTimeout": {},
"RequestTimeoutException": {},
}
// Throttled status codes
// DefaultThrottleErrorCodes provides the set of API error codes that are
// considered throttle errors.
var DefaultThrottleErrorCodes = map[string]struct{}{
"Throttling": {},
"ThrottlingException": {},
"ThrottledException": {},
@ -82,33 +86,66 @@ var DefaultRetryables = []IsErrorRetryable{
RetryableErrorCode{
Codes: DefaultRetryableErrorCodes,
},
RetryableErrorCode{
Codes: DefaultThrottleErrorCodes,
},
}
// DefaultTimeouts provides the set of timeout checks that are used by default.
var DefaultTimeouts = []IsErrorTimeout{
TimeouterError{},
}
// StandardOptions provides the functional options for configuring the standard
// retryable, and delay behavior.
type StandardOptions struct {
// Maximum number of attempts that should be made.
MaxAttempts int
// MaxBackoff duration between retried attempts.
MaxBackoff time.Duration
// Provides the backoff strategy the retryer will use to determine the
// delay between retry attempts.
Backoff BackoffDelayer
// Set of strategies to determine if the attempt should be retried based on
// the error response received.
//
// It is safe to append to this list in NewStandard's functional options.
Retryables []IsErrorRetryable
// Set of strategies to determine if the attempt failed due to a timeout
// error.
//
// It is safe to append to this list in NewStandard's functional options.
Timeouts []IsErrorTimeout
// Provides the rate limiting strategy for rate limiting attempt retries
// across all attempts the retryer is being used with.
RateLimiter RateLimiter
// The cost to deduct from the RateLimiter's token bucket per retry.
RetryCost uint
// The cost to deduct from the RateLimiter's token bucket per retry caused
// by timeout error.
RetryTimeoutCost uint
// The cost to payback to the RateLimiter's token bucket for successful
// attempts.
NoRetryIncrement uint
}
// RateLimiter provides the interface for limiting the rate of request retries
// allowed by the retrier.
// RateLimiter provides the interface for limiting the rate of attempt retries
// allowed by the retryer.
type RateLimiter interface {
GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error)
AddTokens(uint) error
}
// Standard is the standard retry pattern for the SDK. It uses a set of
// retryable checks to determine of the failed request should be retried, and
// retryable checks to determine of the failed attempt should be retried, and
// what retry delay should be used.
type Standard struct {
options StandardOptions
@ -124,7 +161,8 @@ func NewStandard(fnOpts ...func(*StandardOptions)) *Standard {
o := StandardOptions{
MaxAttempts: DefaultMaxAttempts,
MaxBackoff: DefaultMaxBackoff,
Retryables: DefaultRetryables,
Retryables: append([]IsErrorRetryable{}, DefaultRetryables...),
Timeouts: append([]IsErrorTimeout{}, DefaultTimeouts...),
RateLimiter: ratelimit.NewTokenRateLimit(DefaultRetryRateTokens),
RetryCost: DefaultRetryCost,
@ -134,23 +172,20 @@ func NewStandard(fnOpts ...func(*StandardOptions)) *Standard {
for _, fn := range fnOpts {
fn(&o)
}
if o.MaxAttempts <= 0 {
o.MaxAttempts = DefaultMaxAttempts
}
backoff := o.Backoff
if backoff == nil {
backoff = NewExponentialJitterBackoff(o.MaxBackoff)
}
rs := make([]IsErrorRetryable, len(o.Retryables))
copy(rs, o.Retryables)
ts := make([]IsErrorTimeout, len(o.Timeouts))
copy(ts, o.Timeouts)
return &Standard{
options: o,
backoff: backoff,
retryable: IsErrorRetryables(rs),
timeout: IsErrorTimeouts(ts),
retryable: IsErrorRetryables(o.Retryables),
timeout: IsErrorTimeouts(o.Timeouts),
}
}
@ -171,32 +206,47 @@ func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) {
return s.backoff.BackoffDelay(attempt, err)
}
// GetInitialToken returns the initial request token that can increment the
// retry token pool if the request is successful.
func (s *Standard) GetInitialToken() func(error) error {
return releaseToken(s.incrementTokens).release
// GetAttemptToken returns the token to be released after then attempt completes.
// The release token will add NoRetryIncrement to the RateLimiter token pool if
// the attempt was successful. If the attempt failed, nothing will be done.
func (s *Standard) GetAttemptToken(context.Context) (func(error) error, error) {
return s.GetInitialToken(), nil
}
func (s *Standard) incrementTokens() error {
// GetInitialToken returns a token for adding the NoRetryIncrement to the
// RateLimiter token if the attempt completed successfully without error.
//
// InitialToken applies to result of the each attempt, including the first.
// Whereas the RetryToken applies to the result of subsequent attempts.
//
// Deprecated: use GetAttemptToken instead.
func (s *Standard) GetInitialToken() func(error) error {
return releaseToken(s.noRetryIncrement).release
}
func (s *Standard) noRetryIncrement() error {
return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement)
}
// GetRetryToken attempts to deduct the retry cost from the retry token pool.
// Returning the token release function, or error.
func (s *Standard) GetRetryToken(ctx context.Context, err error) (func(error) error, error) {
func (s *Standard) GetRetryToken(ctx context.Context, opErr error) (func(error) error, error) {
cost := s.options.RetryCost
if s.timeout.IsErrorTimeout(err).Bool() {
if s.timeout.IsErrorTimeout(opErr).Bool() {
cost = s.options.RetryTimeoutCost
}
fn, err := s.options.RateLimiter.GetToken(ctx, cost)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to get rate limit token, %w", err)
}
return releaseToken(fn).release, nil
}
func nopRelease(error) error { return nil }
type releaseToken func() error
func (f releaseToken) release(err error) error {

View File

@ -0,0 +1,60 @@
package retry
import (
"errors"
"github.com/aws/aws-sdk-go-v2/aws"
)
// IsErrorThrottle provides the interface of an implementation to determine if
// a error response from an operation is a throttling error.
type IsErrorThrottle interface {
IsErrorThrottle(error) aws.Ternary
}
// IsErrorThrottles is a collection of checks to determine of the error a
// throttle error. Iterates through the checks and returns the state of
// throttle if any check returns something other than unknown.
type IsErrorThrottles []IsErrorThrottle
// IsErrorThrottle returns if the error is a throttle error if any of the
// checks in the list return a value other than unknown.
func (r IsErrorThrottles) IsErrorThrottle(err error) aws.Ternary {
for _, re := range r {
if v := re.IsErrorThrottle(err); v != aws.UnknownTernary {
return v
}
}
return aws.UnknownTernary
}
// IsErrorThrottleFunc wraps a function with the IsErrorThrottle interface.
type IsErrorThrottleFunc func(error) aws.Ternary
// IsErrorThrottle returns if the error is a throttle error.
func (fn IsErrorThrottleFunc) IsErrorThrottle(err error) aws.Ternary {
return fn(err)
}
// ThrottleErrorCode determines if an attempt should be retried based on the
// API error code.
type ThrottleErrorCode struct {
Codes map[string]struct{}
}
// IsErrorThrottle return if the error is a throttle error based on the error
// codes. Returns unknown if the error doesn't have a code or it is unknown.
func (r ThrottleErrorCode) IsErrorThrottle(err error) aws.Ternary {
var v interface{ ErrorCode() string }
if !errors.As(err, &v) {
return aws.UnknownTernary
}
_, ok := r.Codes[v.ErrorCode()]
if !ok {
return aws.UnknownTernary
}
return aws.TrueTernary
}

View File

@ -6,34 +6,94 @@ import (
"time"
)
// RetryMode provides the mode the API client will use to create a retryer
// based on.
type RetryMode string
const (
// RetryModeStandard model provides rate limited retry attempts with
// exponential backoff delay.
RetryModeStandard RetryMode = "standard"
// RetryModeAdaptive model provides attempt send rate limiting on throttle
// responses in addition to standard mode's retry rate limiting.
//
// Adaptive retry mode is experimental and is subject to change in the
// future.
RetryModeAdaptive RetryMode = "adaptive"
)
// ParseRetryMode attempts to parse a RetryMode from the given string.
// Returning error if the value is not a known RetryMode.
func ParseRetryMode(v string) (mode RetryMode, err error) {
switch v {
case "standard":
return RetryModeStandard, nil
case "adaptive":
return RetryModeAdaptive, nil
default:
return mode, fmt.Errorf("unknown RetryMode, %v", v)
}
}
func (m RetryMode) String() string { return string(m) }
// Retryer is an interface to determine if a given error from a
// request should be retried, and if so what backoff delay to apply. The
// attempt should be retried, and if so what backoff delay to apply. The
// default implementation used by most services is the retry package's Standard
// type. Which contains basic retry logic using exponential backoff.
type Retryer interface {
// IsErrorRetryable returns if the failed request is retryable. This check
// IsErrorRetryable returns if the failed attempt is retryable. This check
// should determine if the error can be retried, or if the error is
// terminal.
IsErrorRetryable(error) bool
// MaxAttempts returns the maximum number of attempts that can be made for
// a request before failing. A value of 0 implies that the request should
// a attempt before failing. A value of 0 implies that the attempt should
// be retried until it succeeds if the errors are retryable.
MaxAttempts() int
// RetryDelay returns the delay that should be used before retrying the
// request. Will return error if the if the delay could not be determined.
// attempt. Will return error if the if the delay could not be determined.
RetryDelay(attempt int, opErr error) (time.Duration, error)
// GetRetryToken attempts to deduct the retry cost from the retry token pool.
// Returning the token release function, or error.
GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error)
// GetInitalToken returns the initial request token that can increment the
// retry token pool if the request is successful.
// GetInitialToken returns the initial attempt token that can increment the
// retry token pool if the attempt is successful.
GetInitialToken() (releaseToken func(error) error)
}
// RetryerV2 is an interface to determine if a given error from a attempt
// should be retried, and if so what backoff delay to apply. The default
// implementation used by most services is the retry package's Standard type.
// Which contains basic retry logic using exponential backoff.
//
// RetryerV2 replaces the Retryer interface, deprecating the GetInitialToken
// method in favor of GetAttemptToken which takes a context, and can return an error.
//
// The SDK's retry package's Attempt middleware, and utilities will always
// wrap a Retryer as a RetryerV2. Delegating to GetInitialToken, only if
// GetAttemptToken is not implemented.
type RetryerV2 interface {
Retryer
// GetInitialToken returns the initial attempt token that can increment the
// retry token pool if the attempt is successful.
//
// Deprecated: This method does not provide a way to block using Context,
// nor can it return an error. Use RetryerV2, and GetAttemptToken instead.
GetInitialToken() (releaseToken func(error) error)
// GetAttemptToken returns the send token that can be used to rate limit
// attempt calls. Will be used by the SDK's retry package's Attempt
// middleware to get a send token prior to calling the temp and releasing
// the send token after the attempt has been made.
GetAttemptToken(context.Context) (func(error) error, error)
}
// NopRetryer provides a RequestRetryDecider implementation that will flag
// all attempt errors as not retryable, with a max attempts of 1.
type NopRetryer struct{}
@ -41,12 +101,12 @@ type NopRetryer struct{}
// IsErrorRetryable returns false for all error values.
func (NopRetryer) IsErrorRetryable(error) bool { return false }
// MaxAttempts always returns 1 for the original request attempt.
// MaxAttempts always returns 1 for the original attempt.
func (NopRetryer) MaxAttempts() int { return 1 }
// RetryDelay is not valid for the NopRetryer. Will always return error.
func (NopRetryer) RetryDelay(int, error) (time.Duration, error) {
return 0, fmt.Errorf("not retrying any request errors")
return 0, fmt.Errorf("not retrying any attempt errors")
}
// GetRetryToken returns a stub function that does nothing.
@ -59,4 +119,9 @@ func (NopRetryer) GetInitialToken() func(error) error {
return nopReleaseToken
}
// GetAttemptToken returns a stub function that does nothing.
func (NopRetryer) GetAttemptToken(context.Context) (func(error) error, error) {
return nopReleaseToken, nil
}
func nopReleaseToken(error) error { return nil }

14
vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
package aws
// ExecutionEnvironmentID is the AWS execution environment runtime identifier.
type ExecutionEnvironmentID string
// RuntimeEnvironment is a collection of values that are determined at runtime
// based on the environment that the SDK is executing in. Some of these values
// may or may not be present based on the executing environment and certain SDK
// configuration properties that drive whether these values are populated..
type RuntimeEnvironment struct {
EnvironmentIdentifier ExecutionEnvironmentID
Region string
EC2InstanceMetadataRegion string
}

View File

@ -8,7 +8,7 @@ import (
const doubleSpace = " "
// StripExcessSpaces will rewrite the passed in slice's string values to not
// contain muliple side-by-side spaces.
// contain multiple side-by-side spaces.
func StripExcessSpaces(str string) string {
var j, k, l, m, spaces int
// Trim trailing spaces

View File

@ -443,7 +443,15 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he
} else {
canonicalHeaders.WriteString(headers[i])
canonicalHeaders.WriteRune(colon)
canonicalHeaders.WriteString(strings.Join(signed[headers[i]], ","))
// Trim out leading, trailing, and dedup inner spaces from signed header values.
values := signed[headers[i]]
for j, v := range values {
cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
canonicalHeaders.WriteString(cleanedValue)
if j < len(values)-1 {
canonicalHeaders.WriteRune(',')
}
}
}
canonicalHeaders.WriteRune('\n')
}

View File

@ -278,3 +278,20 @@ func TimeSlice(vs []time.Time) []*time.Time {
func TimeMap(vs map[string]time.Time) map[string]*time.Time {
return ptr.TimeMap(vs)
}
// Duration returns a pointer value for the time.Duration value passed in.
func Duration(v time.Duration) *time.Duration {
return ptr.Duration(v)
}
// DurationSlice returns a slice of time.Duration pointers from the values
// passed in.
func DurationSlice(vs []time.Duration) []*time.Duration {
return ptr.DurationSlice(vs)
}
// DurationMap returns a map of time.Duration pointers from the values
// passed in.
func DurationMap(vs map[string]time.Duration) map[string]*time.Duration {
return ptr.DurationMap(vs)
}

View File

@ -2,6 +2,7 @@ package http
import (
"crypto/tls"
"github.com/aws/aws-sdk-go-v2/aws"
"net"
"net/http"
"reflect"
@ -68,6 +69,14 @@ func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) {
return b.client.Do(req)
}
// Freeze returns a frozen aws.HTTPClient implementation that is no longer a BuildableClient.
// Use this to prevent the SDK from applying DefaultMode configuration values to a buildable client.
func (b *BuildableClient) Freeze() aws.HTTPClient {
cpy := b.clone()
cpy.build()
return cpy.client
}
func (b *BuildableClient) build() {
b.client = wrapWithLimitedRedirect(&http.Client{
Timeout: b.clientTimeout,

View File

@ -1,3 +1,90 @@
# v1.15.10 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.9 (2022-05-26)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.8 (2022-05-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.7 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.6 (2022-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.5 (2022-05-09)
* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682)
# v1.15.4 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.0 (2022-02-24)
* **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options.
* **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.1 (2022-01-28)
* **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR.
* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug.
* **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.0 (2022-01-07)
* **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache.
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.1 (2021-12-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.0 (2021-12-02)
* **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.3 (2021-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.2 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.1 (2021-11-12)
* **Dependency Update**: Updated to the latest SDK module versions

View File

@ -2,6 +2,7 @@ package config
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
)
@ -34,8 +35,11 @@ var defaultAWSConfigResolvers = []awsConfigResolver{
// Sets the endpoint resolving behavior the API Clients will use for making
// requests to. Clients default to their own clients this allows overrides
// to be specified.
// to be specified. The resolveEndpointResolver option is deprecated, but
// we still need to set it for backwards compatibility on config
// construction.
resolveEndpointResolver,
resolveEndpointResolverWithOptions,
// Sets the retry behavior API clients will use within their retry attempt
// middleware. Defaults to unset, allowing API clients to define their own
@ -51,6 +55,15 @@ var defaultAWSConfigResolvers = []awsConfigResolver{
// API client request pipeline middleware.
resolveAPIOptions,
// Resolves the DefaultsMode that should be used by SDK clients. If this
// mode is set to DefaultsModeAuto.
//
// Comes after HTTPClient and CustomCABundle to ensure the HTTP client is
// configured if provided before invoking IMDS if mode is auto. Comes
// before resolving credentials so that those subsequent clients use the
// configured auto mode.
resolveDefaultsModeOptions,
// Sets the resolved credentials the API clients will use for
// authentication. Provides the SDK's default credential chain.
//
@ -124,17 +137,10 @@ func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigRes
for _, fn := range resolvers {
if err := fn(ctx, &cfg, cs); err != nil {
// TODO provide better error?
return aws.Config{}, err
}
}
var sources []interface{}
for _, s := range cs {
sources = append(sources, s)
}
cfg.ConfigSources = sources
return cfg, nil
}
@ -170,7 +176,9 @@ func (cs configs) ResolveConfig(f func(configs []interface{}) error) error {
func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) {
var options LoadOptions
for _, optFn := range optFns {
optFn(&options)
if err := optFn(&options); err != nil {
return aws.Config{}, err
}
}
// assign Load Options to configs

View File

@ -0,0 +1,47 @@
package config
import (
"context"
"os"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
)
const execEnvVar = "AWS_EXECUTION_ENV"
// DefaultsModeOptions is the set of options that are used to configure
type DefaultsModeOptions struct {
// The SDK configuration defaults mode. Defaults to legacy if not specified.
//
// Supported modes are: auto, cross-region, in-region, legacy, mobile, standard
Mode aws.DefaultsMode
// The EC2 Instance Metadata Client that should be used when performing environment
// discovery when aws.DefaultsModeAuto is set.
//
// If not specified the SDK will construct a client if the instance metadata service has not been disabled by
// the AWS_EC2_METADATA_DISABLED environment variable.
IMDSClient *imds.Client
}
func resolveDefaultsModeRuntimeEnvironment(ctx context.Context, envConfig *EnvConfig, client *imds.Client) (aws.RuntimeEnvironment, error) {
getRegionOutput, err := client.GetRegion(ctx, &imds.GetRegionInput{})
// honor context timeouts, but if we couldn't talk to IMDS don't fail runtime environment introspection.
select {
case <-ctx.Done():
return aws.RuntimeEnvironment{}, err
default:
}
var imdsRegion string
if err == nil {
imdsRegion = getRegionOutput.Region
}
return aws.RuntimeEnvironment{
EnvironmentIdentifier: aws.ExecutionEnvironmentID(os.Getenv(execEnvVar)),
Region: envConfig.Region,
EC2InstanceMetadataRegion: imdsRegion,
}, nil
}

View File

@ -4,13 +4,14 @@ import (
"bytes"
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
)
// CredentialsSourceName provides a name of the provider when config is
@ -63,6 +64,11 @@ const (
awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT"
awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT"
awsDefaultMode = "AWS_DEFAULTS_MODE"
awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS"
awsRetryMode = "AWS_RETRY_MODE"
)
var (
@ -226,6 +232,22 @@ type EnvConfig struct {
//
// AWS_USE_FIPS_ENDPOINT=true
UseFIPSEndpoint aws.FIPSEndpointState
// Specifies the SDK Defaults Mode used by services.
//
// AWS_DEFAULTS_MODE=standard
DefaultsMode aws.DefaultsMode
// Specifies the maximum number attempts an API client will call an
// operation that fails with a retryable error.
//
// AWS_MAX_ATTEMPTS=3
RetryMaxAttempts int
// Specifies the retry model the API client will be created with.
//
// aws_retry_mode=standard
RetryMode aws.RetryMode
}
// loadEnvConfig reads configuration values from the OS's environment variables.
@ -292,9 +314,45 @@ func NewEnvConfig() (EnvConfig, error) {
return cfg, err
}
if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil {
return cfg, err
}
if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil {
return cfg, err
}
if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil {
return cfg, err
}
return cfg, nil
}
func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) {
if len(c.DefaultsMode) == 0 {
return "", false, nil
}
return c.DefaultsMode, true, nil
}
// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified,
// and not 0.
func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) {
if c.RetryMaxAttempts == 0 {
return 0, false, nil
}
return c.RetryMaxAttempts, true, nil
}
// GetRetryMode returns the RetryMode of AWS_RETRY_MODE if was specified, and a
// valid value.
func (c EnvConfig) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) {
if len(c.RetryMode) == 0 {
return "", false, nil
}
return c.RetryMode, true, nil
}
func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) {
for _, k := range keys {
value := os.Getenv(k)
@ -313,6 +371,31 @@ func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) {
}
}
func setDefaultsModeFromEnvVal(mode *aws.DefaultsMode, keys []string) error {
for _, k := range keys {
if value := os.Getenv(k); len(value) > 0 {
if ok := mode.SetFromString(value); !ok {
return fmt.Errorf("invalid %s value: %s", k, value)
}
break
}
}
return nil
}
func setRetryModeFromEnvVal(mode *aws.RetryMode, keys []string) (err error) {
for _, k := range keys {
if value := os.Getenv(k); len(value) > 0 {
*mode, err = aws.ParseRetryMode(value)
if err != nil {
return fmt.Errorf("invalid %s value, %w", k, err)
}
break
}
}
return nil
}
func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error {
for _, k := range keys {
value := os.Getenv(k)
@ -438,6 +521,21 @@ func setStringFromEnvVal(dst *string, keys []string) {
}
}
func setIntFromEnvVal(dst *int, keys []string) error {
for _, k := range keys {
if v := os.Getenv(k); len(v) > 0 {
i, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return fmt.Errorf("invalid value %s=%s, %w", k, v, err)
}
*dst = int(i)
break
}
}
return nil
}
func setBoolPtrFromEnvVal(dst **bool, keys []string) error {
for _, k := range keys {
value := os.Getenv(k)

View File

@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.10.1"
const goModuleVersion = "1.15.10"

View File

@ -18,7 +18,8 @@ import (
// LoadOptionsFunc is a type alias for LoadOptions functional option
type LoadOptionsFunc func(*LoadOptions) error
// LoadOptions are discrete set of options that are valid for loading the configuration
// LoadOptions are discrete set of options that are valid for loading the
// configuration
type LoadOptions struct {
// Region is the region to send requests to.
@ -30,12 +31,36 @@ type LoadOptions struct {
// HTTPClient the SDK's API clients will use to invoke HTTP requests.
HTTPClient HTTPClient
// EndpointResolver that can be used to provide or override an endpoint for the given
// service and region Please see the `aws.EndpointResolver` documentation on usage.
// EndpointResolver that can be used to provide or override an endpoint for
// the given service and region.
//
// See the `aws.EndpointResolver` documentation on usage.
//
// Deprecated: See EndpointResolverWithOptions
EndpointResolver aws.EndpointResolver
// Retryer is a function that provides a Retryer implementation. A Retryer guides how HTTP requests should be
// retried in case of recoverable failures.
// EndpointResolverWithOptions that can be used to provide or override an
// endpoint for the given service and region.
//
// See the `aws.EndpointResolverWithOptions` documentation on usage.
EndpointResolverWithOptions aws.EndpointResolverWithOptions
// RetryMaxAttempts specifies the maximum number attempts an API client
// will call an operation that fails with a retryable error.
//
// This value will only be used if Retryer option is nil.
RetryMaxAttempts int
// RetryMode specifies the retry model the API client will be created with.
//
// This value will only be used if Retryer option is nil.
RetryMode aws.RetryMode
// Retryer is a function that provides a Retryer implementation. A Retryer
// guides how HTTP requests should be retried in case of recoverable
// failures.
//
// If not nil, RetryMaxAttempts, and RetryMode will be ignored.
Retryer func() aws.Retryer
// APIOptions provides the set of middleware mutations modify how the API
@ -46,51 +71,63 @@ type LoadOptions struct {
// Logger writer interface to write logging messages to.
Logger logging.Logger
// ClientLogMode is used to configure the events that will be sent to the configured logger.
// This can be used to configure the logging of signing, retries, request, and responses
// of the SDK clients.
// ClientLogMode is used to configure the events that will be sent to the
// configured logger. This can be used to configure the logging of signing,
// retries, request, and responses of the SDK clients.
//
// See the ClientLogMode type documentation for the complete set of logging modes and available
// configuration.
// See the ClientLogMode type documentation for the complete set of logging
// modes and available configuration.
ClientLogMode *aws.ClientLogMode
// SharedConfigProfile is the profile to be used when loading the SharedConfig
SharedConfigProfile string
// SharedConfigFiles is the slice of custom shared config files to use when loading the SharedConfig.
// A non-default profile used within config file must have name defined with prefix 'profile '.
// eg [profile xyz] indicates a profile with name 'xyz'.
// To read more on the format of the config file, please refer the documentation at
// SharedConfigFiles is the slice of custom shared config files to use when
// loading the SharedConfig. A non-default profile used within config file
// must have name defined with prefix 'profile '. eg [profile xyz]
// indicates a profile with name 'xyz'. To read more on the format of the
// config file, please refer the documentation at
// https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config
//
// If duplicate profiles are provided within the same, or across multiple shared config files, the next parsed
// profile will override only the properties that conflict with the previously defined profile.
// Note that if duplicate profiles are provided within the SharedCredentialsFiles and SharedConfigFiles,
// the properties defined in shared credentials file take precedence.
// If duplicate profiles are provided within the same, or across multiple
// shared config files, the next parsed profile will override only the
// properties that conflict with the previously defined profile. Note that
// if duplicate profiles are provided within the SharedCredentialsFiles and
// SharedConfigFiles, the properties defined in shared credentials file
// take precedence.
SharedConfigFiles []string
// SharedCredentialsFile is the slice of custom shared credentials files to use when loading the SharedConfig.
// The profile name used within credentials file must not prefix 'profile '.
// eg [xyz] indicates a profile with name 'xyz'. Profile declared as [profile xyz] will be ignored.
// To read more on the format of the credentials file, please refer the documentation at
// SharedCredentialsFile is the slice of custom shared credentials files to
// use when loading the SharedConfig. The profile name used within
// credentials file must not prefix 'profile '. eg [xyz] indicates a
// profile with name 'xyz'. Profile declared as [profile xyz] will be
// ignored. To read more on the format of the credentials file, please
// refer the documentation at
// https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds
//
// If duplicate profiles are provided with a same, or across multiple shared credentials files, the next parsed
// profile will override only properties that conflict with the previously defined profile.
// Note that if duplicate profiles are provided within the SharedCredentialsFiles and SharedConfigFiles,
// the properties defined in shared credentials file take precedence.
// If duplicate profiles are provided with a same, or across multiple
// shared credentials files, the next parsed profile will override only
// properties that conflict with the previously defined profile. Note that
// if duplicate profiles are provided within the SharedCredentialsFiles and
// SharedConfigFiles, the properties defined in shared credentials file
// take precedence.
SharedCredentialsFiles []string
// CustomCABundle is CA bundle PEM bytes reader
CustomCABundle io.Reader
// DefaultRegion is the fall back region, used if a region was not resolved from other sources
// DefaultRegion is the fall back region, used if a region was not resolved
// from other sources
DefaultRegion string
// UseEC2IMDSRegion indicates if SDK should retrieve the region
// from the EC2 Metadata service
UseEC2IMDSRegion *UseEC2IMDSRegion
// CredentialsCacheOptions is a function for setting the
// aws.CredentialsCacheOptions
CredentialsCacheOptions func(*aws.CredentialsCacheOptions)
// ProcessCredentialOptions is a function for setting
// the processcreds.Options
ProcessCredentialOptions func(*processcreds.Options)
@ -132,10 +169,12 @@ type LoadOptions struct {
// AWS_EC2_METADATA_DISABLED=true
EC2IMDSClientEnableState imds.ClientEnableState
// Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
// Specifies the EC2 Instance Metadata Service default endpoint selection
// mode (IPv4 or IPv6)
EC2IMDSEndpointMode imds.EndpointModeState
// Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode.
// Specifies the EC2 Instance Metadata Service endpoint to use. If
// specified it overrides EC2IMDSEndpointMode.
EC2IMDSEndpoint string
// Specifies that SDK clients must resolve a dual-stack endpoint for
@ -145,6 +184,40 @@ type LoadOptions struct {
// Specifies that SDK clients must resolve a FIPS endpoint for
// services.
UseFIPSEndpoint aws.FIPSEndpointState
// Specifies the SDK configuration mode for defaults.
DefaultsModeOptions DefaultsModeOptions
}
func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) {
if len(o.DefaultsModeOptions.Mode) == 0 {
return "", false, nil
}
return o.DefaultsModeOptions.Mode, true, nil
}
// GetRetryMaxAttempts returns the RetryMaxAttempts if specified in the
// LoadOptions and not 0.
func (o LoadOptions) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) {
if o.RetryMaxAttempts == 0 {
return 0, false, nil
}
return o.RetryMaxAttempts, true, nil
}
// GetRetryMode returns the RetryMode specified in the LoadOptions.
func (o LoadOptions) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) {
if len(o.RetryMode) == 0 {
return "", false, nil
}
return o.RetryMode, true, nil
}
func (o LoadOptions) getDefaultsModeIMDSClient(ctx context.Context) (*imds.Client, bool, error) {
if o.DefaultsModeOptions.IMDSClient == nil {
return nil, false, nil
}
return o.DefaultsModeOptions.IMDSClient, true, nil
}
// getRegion returns Region from config's LoadOptions
@ -355,6 +428,29 @@ func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc {
}
}
// getCredentialsCacheOptionsProvider returns the wrapped function to set aws.CredentialsCacheOptions
func (o LoadOptions) getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) {
if o.CredentialsCacheOptions == nil {
return nil, false, nil
}
return o.CredentialsCacheOptions, true, nil
}
// WithCredentialsCacheOptions is a helper function to construct functional
// options that sets a function to modify the aws.CredentialsCacheOptions the
// aws.CredentialsCache will be configured with, if the CredentialsCache is used
// by the configuration loader.
//
// If multiple WithCredentialsCacheOptions calls are made, the last call
// overrides the previous call values.
func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptionsFunc {
return func(o *LoadOptions) error {
o.CredentialsCacheOptions = v
return nil
}
}
// getProcessCredentialOptions returns the wrapped function to set processcreds.Options
func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) {
if o.ProcessCredentialOptions == nil {
@ -505,6 +601,48 @@ func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc {
}
}
func (o LoadOptions) getRetryMaxAttempts(ctx context.Context) (int, bool, error) {
if o.RetryMaxAttempts == 0 {
return 0, false, nil
}
return o.RetryMaxAttempts, true, nil
}
// WithRetryMaxAttempts is a helper function to construct functional options that sets
// RetryMaxAttempts on LoadOptions. If RetryMaxAttempts is unset, the RetryMaxAttempts value is
// ignored. If multiple WithRetryMaxAttempts calls are made, the last call overrides
// the previous call values.
//
// Will be ignored of LoadOptions.Retryer or WithRetryer are used.
func WithRetryMaxAttempts(v int) LoadOptionsFunc {
return func(o *LoadOptions) error {
o.RetryMaxAttempts = v
return nil
}
}
func (o LoadOptions) getRetryMode(ctx context.Context) (aws.RetryMode, bool, error) {
if o.RetryMode == "" {
return "", false, nil
}
return o.RetryMode, true, nil
}
// WithRetryMode is a helper function to construct functional options that sets
// RetryMode on LoadOptions. If RetryMode is unset, the RetryMode value is
// ignored. If multiple WithRetryMode calls are made, the last call overrides
// the previous call values.
//
// Will be ignored of LoadOptions.Retryer or WithRetryer are used.
func WithRetryMode(v aws.RetryMode) LoadOptionsFunc {
return func(o *LoadOptions) error {
o.RetryMode = v
return nil
}
}
func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) {
if o.Retryer == nil {
return nil, false, nil
@ -533,9 +671,11 @@ func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResol
}
// WithEndpointResolver is a helper function to construct functional options
// that sets endpoint resolver on LoadOptions. The EndpointResolver is set to nil,
// that sets the EndpointResolver on LoadOptions. If the EndpointResolver is set to nil,
// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls
// are made, the last call overrides the previous call values.
//
// Deprecated: See WithEndpointResolverWithOptions
func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc {
return func(o *LoadOptions) error {
o.EndpointResolver = v
@ -543,6 +683,25 @@ func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc {
}
}
func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) {
if o.EndpointResolverWithOptions == nil {
return nil, false, nil
}
return o.EndpointResolverWithOptions, true, nil
}
// WithEndpointResolverWithOptions is a helper function to construct functional options
// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil,
// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls
// are made, the last call overrides the previous call values.
func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc {
return func(o *LoadOptions) error {
o.EndpointResolverWithOptions = v
return nil
}
}
func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) {
if o.Logger == nil {
return nil, false, nil
@ -748,3 +907,20 @@ func (o LoadOptions) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndp
}
return o.UseFIPSEndpoint, true, nil
}
// WithDefaultsMode sets the SDK defaults configuration mode to the value provided.
//
// Zero or more functional options can be provided to provide configuration options for performing
// environment discovery when using aws.DefaultsModeAuto.
func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsModeOptions)) LoadOptionsFunc {
do := DefaultsModeOptions{
Mode: mode,
}
for _, fn := range optFns {
fn(&do)
}
return func(options *LoadOptions) error {
options.DefaultsModeOptions = do
return nil
}
}

View File

@ -11,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go-v2/credentials/processcreds"
"github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
)
@ -162,6 +163,28 @@ func getCredentialsProvider(ctx context.Context, configs configs) (p aws.Credent
return
}
// credentialsCacheOptionsProvider is an interface for retrieving a function for setting
// the aws.CredentialsCacheOptions.
type credentialsCacheOptionsProvider interface {
getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error)
}
// getCredentialsCacheOptionsProvider is an interface for retrieving a function for setting
// the aws.CredentialsCacheOptions.
func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) (
f func(*aws.CredentialsCacheOptions), found bool, err error,
) {
for _, config := range configs {
if p, ok := config.(credentialsCacheOptionsProvider); ok {
f, found, err = p.getCredentialsCacheOptions(ctx)
if err != nil || found {
break
}
}
}
return
}
// processCredentialOptions is an interface for retrieving a function for setting
// the processcreds.Options.
type processCredentialOptions interface {
@ -336,6 +359,25 @@ func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointRe
return
}
// endpointResolverWithOptionsProvider is an interface for retrieving an aws.EndpointResolverWithOptions from a configuration source
type endpointResolverWithOptionsProvider interface {
getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error)
}
// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used
// to configure the aws.Config.EndpointResolver value.
func getEndpointResolverWithOptions(ctx context.Context, configs configs) (f aws.EndpointResolverWithOptions, found bool, err error) {
for _, c := range configs {
if p, ok := c.(endpointResolverWithOptionsProvider); ok {
f, found, err = p.getEndpointResolverWithOptions(ctx)
if err != nil || found {
break
}
}
}
return
}
// loggerProvider is an interface for retrieving a logging.Logger from a configuration source.
type loggerProvider interface {
getLogger(ctx context.Context) (logging.Logger, bool, error)
@ -423,5 +465,69 @@ func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options
}
}
}
return
return v, found, err
}
type defaultsModeIMDSClientProvider interface {
getDefaultsModeIMDSClient(context.Context) (*imds.Client, bool, error)
}
func getDefaultsModeIMDSClient(ctx context.Context, configs configs) (v *imds.Client, found bool, err error) {
for _, c := range configs {
if p, ok := c.(defaultsModeIMDSClientProvider); ok {
v, found, err = p.getDefaultsModeIMDSClient(ctx)
if err != nil || found {
break
}
}
}
return v, found, err
}
type defaultsModeProvider interface {
getDefaultsMode(context.Context) (aws.DefaultsMode, bool, error)
}
func getDefaultsMode(ctx context.Context, configs configs) (v aws.DefaultsMode, found bool, err error) {
for _, c := range configs {
if p, ok := c.(defaultsModeProvider); ok {
v, found, err = p.getDefaultsMode(ctx)
if err != nil || found {
break
}
}
}
return v, found, err
}
type retryMaxAttemptsProvider interface {
GetRetryMaxAttempts(context.Context) (int, bool, error)
}
func getRetryMaxAttempts(ctx context.Context, configs configs) (v int, found bool, err error) {
for _, c := range configs {
if p, ok := c.(retryMaxAttemptsProvider); ok {
v, found, err = p.GetRetryMaxAttempts(ctx)
if err != nil || found {
break
}
}
}
return v, found, err
}
type retryModeProvider interface {
GetRetryMode(context.Context) (aws.RetryMode, bool, error)
}
func getRetryMode(ctx context.Context, configs configs) (v aws.RetryMode, found bool, err error) {
for _, c := range configs {
if p, ok := c.(retryModeProvider); ok {
v, found, err = p.GetRetryMode(ctx)
if err != nil || found {
break
}
}
}
return v, found, err
}

View File

@ -11,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/smithy-go/logging"
)
@ -20,9 +21,15 @@ import (
// This should be used as the first resolver in the slice of resolvers when
// resolving external configuration.
func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error {
var sources []interface{}
for _, s := range cfgs {
sources = append(sources, s)
}
*cfg = aws.Config{
Credentials: aws.AnonymousCredentials{},
Logger: logging.NewStandardLogger(os.Stderr),
ConfigSources: sources,
}
return nil
}
@ -166,6 +173,22 @@ func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs confi
return nil
}
// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice
// and sets the functions result on the aws.Config.EndpointResolver
func resolveEndpointResolverWithOptions(ctx context.Context, cfg *aws.Config, configs configs) error {
endpointResolver, found, err := getEndpointResolverWithOptions(ctx, configs)
if err != nil {
return err
}
if !found {
return nil
}
cfg.EndpointResolverWithOptions = endpointResolver
return nil
}
func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error {
logger, found, err := getLogger(ctx, configs)
if err != nil {
@ -199,13 +222,17 @@ func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error
if err != nil {
return err
}
if !found {
if found {
cfg.Retryer = retryer
return nil
}
cfg.Retryer = retryer
return nil
// Only load the retry options if a custom retryer has not be specified.
if err = resolveRetryMaxAttempts(ctx, cfg, configs); err != nil {
return err
}
return resolveRetryMode(ctx, cfg, configs)
}
func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
@ -225,3 +252,56 @@ func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs)
return nil
}
func resolveDefaultsModeOptions(ctx context.Context, cfg *aws.Config, configs configs) error {
defaultsMode, found, err := getDefaultsMode(ctx, configs)
if err != nil {
return err
}
if !found {
defaultsMode = aws.DefaultsModeLegacy
}
var environment aws.RuntimeEnvironment
if defaultsMode == aws.DefaultsModeAuto {
envConfig, _, _ := getAWSConfigSources(configs)
client, found, err := getDefaultsModeIMDSClient(ctx, configs)
if err != nil {
return err
}
if !found {
client = imds.NewFromConfig(*cfg)
}
environment, err = resolveDefaultsModeRuntimeEnvironment(ctx, envConfig, client)
if err != nil {
return err
}
}
cfg.DefaultsMode = defaultsMode
cfg.RuntimeEnvironment = environment
return nil
}
func resolveRetryMaxAttempts(ctx context.Context, cfg *aws.Config, configs configs) error {
maxAttempts, found, err := getRetryMaxAttempts(ctx, configs)
if err != nil || !found {
return err
}
cfg.RetryMaxAttempts = maxAttempts
return nil
}
func resolveRetryMode(ctx context.Context, cfg *aws.Config, configs configs) error {
retryMode, found, err := getRetryMode(ctx, configs)
if err != nil || !found {
return err
}
cfg.RetryMode = retryMode
return nil
}

View File

@ -59,8 +59,8 @@ func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) e
//
// Config providers used:
// * credentialsProviderProvider
func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, cfgs configs) (bool, error) {
credProvider, found, err := getCredentialsProvider(ctx, cfgs)
func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) {
credProvider, found, err := getCredentialsProvider(ctx, configs)
if err != nil {
return false, err
}
@ -68,7 +68,10 @@ func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, cfgs config
return false, nil
}
cfg.Credentials = wrapWithCredentialsCache(credProvider)
cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider)
if err != nil {
return false, err
}
return true, nil
}
@ -105,7 +108,10 @@ func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs config
}
// Wrap the resolved provider in a cache so the SDK will cache credentials.
cfg.Credentials = wrapWithCredentialsCache(cfg.Credentials)
cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, cfg.Credentials)
if err != nil {
return err
}
return nil
}
@ -248,9 +254,12 @@ func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToke
provider := endpointcreds.New(url, optFns...)
cfg.Credentials = wrapWithCredentialsCache(provider, func(options *aws.CredentialsCacheOptions) {
cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider, func(options *aws.CredentialsCacheOptions) {
options.ExpiryWindow = 5 * time.Minute
})
if err != nil {
return err
}
return nil
}
@ -296,9 +305,10 @@ func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs con
provider := ec2rolecreds.New(optFns...)
cfg.Credentials = wrapWithCredentialsCache(provider, func(options *aws.CredentialsCacheOptions) {
options.ExpiryWindow = 5 * time.Minute
})
cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider)
if err != nil {
return err
}
return nil
}
@ -430,12 +440,31 @@ func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *Shared
return nil
}
// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache with the provided options if the provider is not already a aws.CredentialsCache.
func wrapWithCredentialsCache(provider aws.CredentialsProvider, optFns ...func(options *aws.CredentialsCacheOptions)) aws.CredentialsProvider {
// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache
// with the provided options if the provider is not already a
// aws.CredentialsCache.
func wrapWithCredentialsCache(
ctx context.Context,
cfgs configs,
provider aws.CredentialsProvider,
optFns ...func(options *aws.CredentialsCacheOptions),
) (aws.CredentialsProvider, error) {
_, ok := provider.(*aws.CredentialsCache)
if ok {
return provider
return provider, nil
}
return aws.NewCredentialsCache(provider, optFns...)
credCacheOptions, found, err := getCredentialsCacheOptionsProvider(ctx, cfgs)
if err != nil {
return nil, err
}
// force allocation of a new slice if the additional options are
// needed, to prevent overwriting the passed in slice of options.
optFns = optFns[:len(optFns):len(optFns)]
if found {
optFns = append(optFns, credCacheOptions)
}
return aws.NewCredentialsCache(provider, optFns...), nil
}

View File

@ -1,17 +1,19 @@
package config
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/internal/ini"
"github.com/aws/smithy-go/logging"
)
@ -76,6 +78,14 @@ const (
s3DisableMultiRegionAccessPointsKey = `s3_disable_multiregion_access_points`
useFIPSEndpointKey = "use_fips_endpoint"
defaultsModeKey = "defaults_mode"
// Retry options
retryMaxAttemptsKey = "max_attempts"
retryModeKey = "retry_mode"
caBundleKey = "ca_bundle"
)
// defaultSharedConfigProfile allows for swapping the default profile for testing
@ -166,12 +176,14 @@ type SharedConfig struct {
// s3_use_arn_region=true
S3UseARNRegion *bool
// Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
// Specifies the EC2 Instance Metadata Service default endpoint selection
// mode (IPv4 or IPv6)
//
// ec2_metadata_service_endpoint_mode=IPv6
EC2IMDSEndpointMode imds.EndpointModeState
// Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode.
// Specifies the EC2 Instance Metadata Service endpoint to use. If
// specified it overrides EC2IMDSEndpointMode.
//
// ec2_metadata_service_endpoint=http://fd00:ec2::254
EC2IMDSEndpoint string
@ -193,6 +205,65 @@ type SharedConfig struct {
//
// use_fips_endpoint=true
UseFIPSEndpoint aws.FIPSEndpointState
// Specifies which defaults mode should be used by services.
//
// defaults_mode=standard
DefaultsMode aws.DefaultsMode
// Specifies the maximum number attempts an API client will call an
// operation that fails with a retryable error.
//
// max_attempts=3
RetryMaxAttempts int
// Specifies the retry model the API client will be created with.
//
// retry_mode=standard
RetryMode aws.RetryMode
// Sets the path to a custom Credentials Authority (CA) Bundle PEM file
// that the SDK will use instead of the system's root CA bundle. Only use
// this if you want to configure the SDK to use a custom set of CAs.
//
// Enabling this option will attempt to merge the Transport into the SDK's
// HTTP client. If the client's Transport is not a http.Transport an error
// will be returned. If the Transport's TLS config is set this option will
// cause the SDK to overwrite the Transport's TLS config's RootCAs value.
//
// Setting a custom HTTPClient in the aws.Config options will override this
// setting. To use this option and custom HTTP client, the HTTP client
// needs to be provided when creating the config. Not the service client.
//
// ca_bundle=$HOME/my_custom_ca_bundle
CustomCABundle string
}
func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) {
if len(c.DefaultsMode) == 0 {
return "", false, nil
}
return c.DefaultsMode, true, nil
}
// GetRetryMaxAttempts returns the maximum number of attempts an API client
// created Retryer should attempt an operation call before failing.
func (c SharedConfig) GetRetryMaxAttempts(ctx context.Context) (value int, ok bool, err error) {
if c.RetryMaxAttempts == 0 {
return 0, false, nil
}
return c.RetryMaxAttempts, true, nil
}
// GetRetryMode returns the model the API client should create its Retryer in.
func (c SharedConfig) GetRetryMode(ctx context.Context) (value aws.RetryMode, ok bool, err error) {
if len(c.RetryMode) == 0 {
return "", false, nil
}
return c.RetryMode, true, nil
}
// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region
@ -275,6 +346,19 @@ func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEnd
return c.UseFIPSEndpoint, true, nil
}
// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was
func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) {
if len(c.CustomCABundle) == 0 {
return nil, false, nil
}
b, err := ioutil.ReadFile(c.CustomCABundle)
if err != nil {
return nil, false, err
}
return bytes.NewReader(b), true, nil
}
// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the
// addition of ignoring when none of the files exist or when the profile
// is not found in any of the files.
@ -549,12 +633,8 @@ func mergeSections(dst, src ini.Sections) error {
secretKey := srcSection.String(secretAccessKey)
if dstSection.Has(accessKeyIDKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding credentials value for aws access key id, "+
"and aws secret access key, defined in %v, with values found in a duplicate profile "+
"defined at file %v. \n",
sectionName, dstSection.SourceFile[accessKeyIDKey],
srcSection.SourceFile[accessKeyIDKey]))
dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, accessKeyIDKey,
dstSection.SourceFile[accessKeyIDKey], srcSection.SourceFile[accessKeyIDKey]))
}
// update access key
@ -572,24 +652,8 @@ func mergeSections(dst, src ini.Sections) error {
dstSection.UpdateValue(secretAccessKey, v)
// update session token
if srcSection.Has(sessionTokenKey) {
sessionKey := srcSection.String(sessionTokenKey)
val, e := ini.NewStringValue(sessionKey)
if e != nil {
return fmt.Errorf("error merging session key, %w", e)
}
if dstSection.Has(sessionTokenKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, sessionTokenKey, dstSection.SourceFile[sessionTokenKey],
sessionTokenKey, srcSection.SourceFile[sessionTokenKey]))
}
dstSection.UpdateValue(sessionTokenKey, val)
dstSection.UpdateSourceFile(sessionTokenKey, srcSection.SourceFile[sessionTokenKey])
if err = mergeStringKey(&srcSection, &dstSection, sectionName, sessionTokenKey); err != nil {
return err
}
// update source file to reflect where the static creds came from
@ -597,244 +661,40 @@ func mergeSections(dst, src ini.Sections) error {
dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey])
}
if srcSection.Has(roleArnKey) {
key := srcSection.String(roleArnKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging roleArnKey, %w", err)
stringKeys := []string{
roleArnKey,
sourceProfileKey,
credentialSourceKey,
externalIDKey,
mfaSerialKey,
roleSessionNameKey,
regionKey,
enableEndpointDiscoveryKey,
credentialProcessKey,
webIdentityTokenFileKey,
s3UseARNRegionKey,
s3DisableMultiRegionAccessPointsKey,
ec2MetadataServiceEndpointModeKey,
ec2MetadataServiceEndpointKey,
useDualStackEndpoint,
useFIPSEndpointKey,
defaultsModeKey,
retryModeKey,
}
for i := range stringKeys {
if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil {
return err
}
}
if dstSection.Has(roleArnKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, roleArnKey, dstSection.SourceFile[roleArnKey],
roleArnKey, srcSection.SourceFile[roleArnKey]))
intKeys := []string{
roleDurationSecondsKey,
retryMaxAttemptsKey,
}
dstSection.UpdateValue(roleArnKey, val)
dstSection.UpdateSourceFile(roleArnKey, srcSection.SourceFile[roleArnKey])
for i := range intKeys {
if err := mergeIntKey(&srcSection, &dstSection, sectionName, intKeys[i]); err != nil {
return err
}
if srcSection.Has(sourceProfileKey) {
key := srcSection.String(sourceProfileKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging sourceProfileKey, %w", err)
}
if dstSection.Has(sourceProfileKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, sourceProfileKey, dstSection.SourceFile[sourceProfileKey],
sourceProfileKey, srcSection.SourceFile[sourceProfileKey]))
}
dstSection.UpdateValue(sourceProfileKey, val)
dstSection.UpdateSourceFile(sourceProfileKey, srcSection.SourceFile[sourceProfileKey])
}
if srcSection.Has(credentialSourceKey) {
key := srcSection.String(credentialSourceKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging credentialSourceKey, %w", err)
}
if dstSection.Has(credentialSourceKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, credentialSourceKey, dstSection.SourceFile[credentialSourceKey],
credentialSourceKey, srcSection.SourceFile[credentialSourceKey]))
}
dstSection.UpdateValue(credentialSourceKey, val)
dstSection.UpdateSourceFile(credentialSourceKey, srcSection.SourceFile[credentialSourceKey])
}
if srcSection.Has(externalIDKey) {
key := srcSection.String(externalIDKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging externalIDKey, %w", err)
}
if dstSection.Has(externalIDKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, externalIDKey, dstSection.SourceFile[externalIDKey],
externalIDKey, srcSection.SourceFile[externalIDKey]))
}
dstSection.UpdateValue(externalIDKey, val)
dstSection.UpdateSourceFile(externalIDKey, srcSection.SourceFile[externalIDKey])
}
if srcSection.Has(mfaSerialKey) {
key := srcSection.String(mfaSerialKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging mfaSerialKey, %w", err)
}
if dstSection.Has(mfaSerialKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, mfaSerialKey, dstSection.SourceFile[mfaSerialKey],
mfaSerialKey, srcSection.SourceFile[mfaSerialKey]))
}
dstSection.UpdateValue(mfaSerialKey, val)
dstSection.UpdateSourceFile(mfaSerialKey, srcSection.SourceFile[mfaSerialKey])
}
if srcSection.Has(roleSessionNameKey) {
key := srcSection.String(roleSessionNameKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging roleSessionNameKey, %w", err)
}
if dstSection.Has(roleSessionNameKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, roleSessionNameKey, dstSection.SourceFile[roleSessionNameKey],
roleSessionNameKey, srcSection.SourceFile[roleSessionNameKey]))
}
dstSection.UpdateValue(roleSessionNameKey, val)
dstSection.UpdateSourceFile(roleSessionNameKey, srcSection.SourceFile[roleSessionNameKey])
}
// role duration seconds key update
if srcSection.Has(roleDurationSecondsKey) {
roleDurationSeconds := srcSection.Int(roleDurationSecondsKey)
v, err := ini.NewIntValue(roleDurationSeconds)
if err != nil {
return fmt.Errorf("error merging role duration seconds key, %w", err)
}
dstSection.UpdateValue(roleDurationSecondsKey, v)
dstSection.UpdateSourceFile(roleDurationSecondsKey, srcSection.SourceFile[roleDurationSecondsKey])
}
if srcSection.Has(regionKey) {
key := srcSection.String(regionKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging regionKey, %w", err)
}
if dstSection.Has(regionKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, regionKey, dstSection.SourceFile[regionKey],
regionKey, srcSection.SourceFile[regionKey]))
}
dstSection.UpdateValue(regionKey, val)
dstSection.UpdateSourceFile(regionKey, srcSection.SourceFile[regionKey])
}
if srcSection.Has(enableEndpointDiscoveryKey) {
key := srcSection.String(enableEndpointDiscoveryKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging enableEndpointDiscoveryKey, %w", err)
}
if dstSection.Has(enableEndpointDiscoveryKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, enableEndpointDiscoveryKey, dstSection.SourceFile[enableEndpointDiscoveryKey],
enableEndpointDiscoveryKey, srcSection.SourceFile[enableEndpointDiscoveryKey]))
}
dstSection.UpdateValue(enableEndpointDiscoveryKey, val)
dstSection.UpdateSourceFile(enableEndpointDiscoveryKey, srcSection.SourceFile[enableEndpointDiscoveryKey])
}
if srcSection.Has(credentialProcessKey) {
key := srcSection.String(credentialProcessKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging credentialProcessKey, %w", err)
}
if dstSection.Has(credentialProcessKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, credentialProcessKey, dstSection.SourceFile[credentialProcessKey],
credentialProcessKey, srcSection.SourceFile[credentialProcessKey]))
}
dstSection.UpdateValue(credentialProcessKey, val)
dstSection.UpdateSourceFile(credentialProcessKey, srcSection.SourceFile[credentialProcessKey])
}
if srcSection.Has(webIdentityTokenFileKey) {
key := srcSection.String(webIdentityTokenFileKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging webIdentityTokenFileKey, %w", err)
}
if dstSection.Has(webIdentityTokenFileKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, webIdentityTokenFileKey, dstSection.SourceFile[webIdentityTokenFileKey],
webIdentityTokenFileKey, srcSection.SourceFile[webIdentityTokenFileKey]))
}
dstSection.UpdateValue(webIdentityTokenFileKey, val)
dstSection.UpdateSourceFile(webIdentityTokenFileKey, srcSection.SourceFile[webIdentityTokenFileKey])
}
if srcSection.Has(s3UseARNRegionKey) {
key := srcSection.String(s3UseARNRegionKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging s3UseARNRegionKey, %w", err)
}
if dstSection.Has(s3UseARNRegionKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, s3UseARNRegionKey, dstSection.SourceFile[s3UseARNRegionKey],
s3UseARNRegionKey, srcSection.SourceFile[s3UseARNRegionKey]))
}
dstSection.UpdateValue(s3UseARNRegionKey, val)
dstSection.UpdateSourceFile(s3UseARNRegionKey, srcSection.SourceFile[s3UseARNRegionKey])
}
if srcSection.Has(s3DisableMultiRegionAccessPointsKey) {
key := srcSection.String(s3DisableMultiRegionAccessPointsKey)
val, err := ini.NewStringValue(key)
if err != nil {
return fmt.Errorf("error merging s3DisableMultiRegionAccessPointsKey, %w", err)
}
if dstSection.Has(s3DisableMultiRegionAccessPointsKey) {
dstSection.Logs = append(dstSection.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, s3DisableMultiRegionAccessPointsKey, dstSection.SourceFile[s3DisableMultiRegionAccessPointsKey],
s3DisableMultiRegionAccessPointsKey, srcSection.SourceFile[s3DisableMultiRegionAccessPointsKey]))
}
dstSection.UpdateValue(s3DisableMultiRegionAccessPointsKey, val)
dstSection.UpdateSourceFile(s3DisableMultiRegionAccessPointsKey, srcSection.SourceFile[s3DisableMultiRegionAccessPointsKey])
}
// set srcSection on dst srcSection
@ -844,6 +704,51 @@ func mergeSections(dst, src ini.Sections) error {
return nil
}
func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error {
if srcSection.Has(key) {
srcValue := srcSection.String(key)
val, err := ini.NewStringValue(srcValue)
if err != nil {
return fmt.Errorf("error merging %s, %w", key, err)
}
if dstSection.Has(key) {
dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key,
dstSection.SourceFile[key], srcSection.SourceFile[key]))
}
dstSection.UpdateValue(key, val)
dstSection.UpdateSourceFile(key, srcSection.SourceFile[key])
}
return nil
}
func mergeIntKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error {
if srcSection.Has(key) {
srcValue := srcSection.Int(key)
v, err := ini.NewIntValue(srcValue)
if err != nil {
return fmt.Errorf("error merging %s, %w", key, err)
}
if dstSection.Has(key) {
dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key,
dstSection.SourceFile[key], srcSection.SourceFile[key]))
}
dstSection.UpdateValue(key, v)
dstSection.UpdateSourceFile(key, srcSection.SourceFile[key])
}
return nil
}
func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string {
return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
sectionName, key, dstSourceFile, key, srcSourceFile)
}
// Returns an error if all of the files fail to load. If at least one file is
// successfully loaded and contains the profile, no error will be returned.
func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string,
@ -991,6 +896,19 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint)
updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey)
if err := updateDefaultsMode(&c.DefaultsMode, section, defaultsModeKey); err != nil {
return fmt.Errorf("failed to load %s from shared config, %w", defaultsModeKey, err)
}
if err := updateInt(&c.RetryMaxAttempts, section, retryMaxAttemptsKey); err != nil {
return fmt.Errorf("failed to load %s from shared config, %w", retryMaxAttemptsKey, err)
}
if err := updateRetryMode(&c.RetryMode, section, retryModeKey); err != nil {
return fmt.Errorf("failed to load %s from shared config, %w", retryModeKey, err)
}
updateString(&c.CustomCABundle, section, caBundleKey)
// Shared Credentials
creds := aws.Credentials{
AccessKeyID: section.String(accessKeyIDKey),
@ -1006,6 +924,28 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
return nil
}
func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error {
if !section.Has(key) {
return nil
}
value := section.String(key)
if ok := mode.SetFromString(value); !ok {
return fmt.Errorf("invalid value: %s", value)
}
return nil
}
func updateRetryMode(mode *aws.RetryMode, section ini.Section, key string) (err error) {
if !section.Has(key) {
return nil
}
value := section.String(key)
if *mode, err = aws.ParseRetryMode(value); err != nil {
return err
}
return nil
}
func updateEC2MetadataServiceEndpointMode(endpointMode *imds.EndpointModeState, section ini.Section, key string) error {
if !section.Has(key) {
return nil
@ -1204,12 +1144,9 @@ func (e CredentialRequiresARNError) Error() string {
}
func userHomeDir() string {
if runtime.GOOS == "windows" { // Windows
return os.Getenv("USERPROFILE")
}
// *nix
return os.Getenv("HOME")
// Ignore errors since we only care about Windows and *nix.
homedir, _ := os.UserHomeDir()
return homedir
}
func oneOrNone(bs ...bool) bool {
@ -1236,6 +1173,24 @@ func updateString(dst *string, section ini.Section, key string) {
*dst = section.String(key)
}
// updateInt will only update the dst with the value in the section key, key
// is present in the section.
//
// Down casts the INI integer value from a int64 to an int, which could be
// different bit size depending on platform.
func updateInt(dst *int, section ini.Section, key string) error {
if !section.Has(key) {
return nil
}
if vt, _ := section.ValueType(key); vt != ini.IntegerType {
return fmt.Errorf("invalid value %s=%s, expect integer",
key, section.String(key))
}
*dst = int(section.Int(key))
return nil
}
// updateBool will only update the dst with the value in the section key, key
// is present in the section.
func updateBool(dst *bool, section ini.Section, key string) {

View File

@ -1,3 +1,78 @@
# v1.12.5 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.4 (2022-05-26)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.3 (2022-05-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.2 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.1 (2022-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.0 (2022-04-25)
* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.2 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.1 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.0 (2022-03-23)
* **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.0 (2022-02-24)
* **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.8.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.6.5 (2021-12-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.6.4 (2021-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.6.3 (2021-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.6.2 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.6.1 (2021-11-12)
* **Dependency Update**: Updated to the latest SDK module versions

View File

@ -5,13 +5,18 @@ import (
"context"
"encoding/json"
"fmt"
"math"
"path"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
"github.com/aws/smithy-go"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
)
// ProviderName provides a name of EC2Role provider
@ -26,14 +31,10 @@ type GetMetadataAPIClient interface {
// A Provider retrieves credentials from the EC2 service, and keeps track if
// those credentials are expired.
//
// The New function must be used to create the Provider.
// The New function must be used to create the with a custom EC2 IMDS client.
//
// p := &ec2rolecreds.New(ec2rolecreds.Options{
// Client: imds.New(imds.Options{}),
//
// // Expire the credentials 10 minutes before IAM states they should.
// // Proactively refreshing the credentials.
// ExpiryWindow: 10 * time.Minute
// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{
// o.Client = imds.New(imds.Options{/* custom options */})
// })
type Provider struct {
options Options
@ -66,9 +67,8 @@ func New(optFns ...func(*Options)) *Provider {
}
}
// Retrieve retrieves credentials from the EC2 service.
// Error will be returned if the request fails, or unable to extract
// the desired credentials.
// Retrieve retrieves credentials from the EC2 service. Error will be returned
// if the request fails, or unable to extract the desired credentials.
func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
credsList, err := requestCredList(ctx, p.options.Client)
if err != nil {
@ -96,10 +96,65 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
Expires: roleCreds.Expiration,
}
// Cap role credentials Expires to 1 hour so they can be refreshed more
// often. Jitter will be applied credentials cache if being used.
if anHour := sdk.NowTime().Add(1 * time.Hour); creds.Expires.After(anHour) {
creds.Expires = anHour
}
return creds, nil
}
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
// HandleFailToRefresh will extend the credentials Expires time if it it is
// expired. If the credentials will not expire within the minimum time, they
// will be returned.
//
// If the credentials cannot expire, the original error will be returned.
func (p *Provider) HandleFailToRefresh(ctx context.Context, prevCreds aws.Credentials, err error) (
aws.Credentials, error,
) {
if !prevCreds.CanExpire {
return aws.Credentials{}, err
}
if prevCreds.Expires.After(sdk.NowTime().Add(5 * time.Minute)) {
return prevCreds, nil
}
newCreds := prevCreds
randFloat64, err := sdkrand.CryptoRandFloat64()
if err != nil {
return aws.Credentials{}, fmt.Errorf("failed to get random float, %w", err)
}
// Random distribution of [5,15) minutes.
expireOffset := time.Duration(randFloat64*float64(10*time.Minute)) + 5*time.Minute
newCreds.Expires = sdk.NowTime().Add(expireOffset)
logger := middleware.GetLogger(ctx)
logger.Logf(logging.Warn, "Attempting credential expiration extension due to a credential service availability issue. A refresh of these credentials will be attempted again in %v minutes.", math.Floor(expireOffset.Minutes()))
return newCreds, nil
}
// AdjustExpiresBy will adds the passed in duration to the passed in
// credential's Expires time, unless the time until Expires is less than 15
// minutes. Returns the credentials, even if not updated.
func (p *Provider) AdjustExpiresBy(creds aws.Credentials, dur time.Duration) (
aws.Credentials, error,
) {
if !creds.CanExpire {
return creds, nil
}
if creds.Expires.Before(sdk.NowTime().Add(15 * time.Minute)) {
return creds, nil
}
creds.Expires = creds.Expires.Add(dur)
return creds, nil
}
// ec2RoleCredRespBody provides the shape for unmarshaling credential
// request responses.
type ec2RoleCredRespBody struct {
// Success State

View File

@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.6.1"
const goModuleVersion = "1.12.5"

View File

@ -136,8 +136,13 @@ type AssumeRoleAPIClient interface {
AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error)
}
// DefaultDuration is the default amount of time in minutes that the credentials
// will be valid for.
// DefaultDuration is the default amount of time in minutes that the
// credentials will be valid for. This value is only used by AssumeRoleProvider
// for specifying the default expiry duration of an assume role.
//
// Other providers such as WebIdentityRoleProvider do not use this value, and
// instead rely on STS API's default parameter handing to assign a default
// value.
var DefaultDuration = time.Duration(15) * time.Minute
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
@ -208,6 +213,18 @@ type AssumeRoleOptions struct {
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
SerialNumber *string
// The source identity specified by the principal that is calling the AssumeRole
// operation. You can require users to specify a source identity when they assume a
// role. You do this by using the sts:SourceIdentity condition key in a role trust
// policy. You can use source identity information in CloudTrail logs to determine
// who took actions with a role. You can use the aws:SourceIdentity condition key
// to further control access to Amazon Web Services resources based on the value of
// source identity. For more information about using source identity, see Monitor
// and control actions taken with assumed roles
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
// in the IAM User Guide.
SourceIdentity *string
// Async method of providing MFA token code for assuming an IAM role with MFA.
// The value returned by the function will be used as the TokenCode in the Retrieve
// call. See StdinTokenProvider for a provider that prompts and reads from stdin.
@ -266,6 +283,7 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err
RoleArn: aws.String(p.options.RoleARN),
RoleSessionName: aws.String(p.options.RoleSessionName),
ExternalId: p.options.ExternalID,
SourceIdentity: p.options.SourceIdentity,
Tags: p.options.Tags,
TransitiveTagKeys: p.options.TransitiveTagKeys,
}

View File

@ -5,6 +5,7 @@ import (
"fmt"
"io/ioutil"
"strconv"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/retry"
@ -45,6 +46,19 @@ type WebIdentityRoleOptions struct {
// Session name, if you wish to uniquely identify this session.
RoleSessionName string
// Expiry duration of the STS credentials. STS will assign a default expiry
// duration if this value is unset. This is different from the Duration
// option of AssumeRoleProvider, which automatically assigns 15 minutes if
// Duration is unset.
//
// See the STS AssumeRoleWithWebIdentity API reference guide for more
// information on defaults.
// https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html
Duration time.Duration
// An IAM policy in JSON format that you want to use as an inline session policy.
Policy *string
// The Amazon Resource Names (ARNs) of the IAM managed policies that you
// want to use as managed session policies. The policies must exist in the
// same account as the role.
@ -100,12 +114,21 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials
// uses unix time in nanoseconds to uniquely identify sessions.
sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10)
}
resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, &sts.AssumeRoleWithWebIdentityInput{
input := &sts.AssumeRoleWithWebIdentityInput{
PolicyArns: p.options.PolicyARNs,
RoleArn: &p.options.RoleARN,
RoleSessionName: &sessionName,
WebIdentityToken: aws.String(string(b)),
}, func(options *sts.Options) {
}
if p.options.Duration != 0 {
// If set use the value, otherwise STS will assign a default expiration duration.
input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second))
}
if p.options.Policy != nil {
input.Policy = p.options.Policy
}
resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) {
options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode)
})
if err != nil {

View File

@ -1,3 +1,55 @@
# v1.12.6 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.5 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.4 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.8.2 (2021-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.8.1 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.8.0 (2021-11-06)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version

View File

@ -3,4 +3,4 @@
package imds
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.8.0"
const goModuleVersion = "1.12.6"

View File

@ -1,3 +1,51 @@
# v1.1.12 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.11 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.10 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.9 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.8 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.7 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.6 (2022-03-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.5 (2022-02-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.4 (2022-01-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.3 (2022-01-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.2 (2021-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.1 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.0 (2021-11-06)
* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.

View File

@ -3,4 +3,4 @@
package configsources
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.1.0"
const goModuleVersion = "1.1.12"

View File

@ -1,3 +1,55 @@
# v2.4.6 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.5 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.4 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v2.3.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v2.2.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v2.1.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v2.0.2 (2021-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.0.1 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.0.0 (2021-11-06)
* **Release**: Endpoint Variant Model Support

View File

@ -3,4 +3,4 @@
package endpoints
// goModuleVersion is the tagged release for this module
const goModuleVersion = "2.0.0"
const goModuleVersion = "2.4.6"

View File

@ -1,3 +1,56 @@
# v1.3.13 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.12 (2022-05-17)
* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.11 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.10 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.9 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.8 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.7 (2022-03-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.6 (2022-02-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.5 (2022-01-28)
* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug.
# v1.3.4 (2022-01-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.3 (2022-01-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.2 (2021-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.1 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.0 (2021-11-06)
* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.

View File

@ -1,18 +0,0 @@
//go:build gofuzz
// +build gofuzz
package ini
import (
"bytes"
)
func Fuzz(data []byte) int {
b := bytes.NewReader(data)
if _, err := Parse(b); err != nil {
return 0
}
return 1
}

View File

@ -3,4 +3,4 @@
package ini
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.3.0"
const goModuleVersion = "1.3.13"

View File

@ -216,22 +216,8 @@ func NewStringValue(str string) (Value, error) {
// NewIntValue returns a Value type generated using an int64 input.
func NewIntValue(i int64) (Value, error) {
return newValue(IntegerType, 10, []rune{rune(i)})
}
// Append will append values and change the type to a string
// type.
func (v *Value) Append(tok Token) {
r := tok.Raw()
if v.Type != QuotedStringType {
v.Type = StringType
r = tok.raw[1 : len(tok.raw)-1]
}
if tok.Type() != TokenLit {
v.raw = append(v.raw, tok.Raw()...)
} else {
v.raw = append(v.raw, r...)
}
v := strconv.FormatInt(i, 10)
return newValue(IntegerType, 10, []rune(v))
}
func (v Value) String() string {

View File

@ -29,5 +29,5 @@ func Float64(reader io.Reader) (float64, error) {
// CryptoRandFloat64 returns a random float64 obtained from the crypto rand
// source.
func CryptoRandFloat64() (float64, error) {
return Float64(rand.Reader)
return Float64(Reader)
}

View File

@ -14,7 +14,7 @@ distribution.
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
@ -25,3 +25,4 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,7 @@
// Package singleflight provides a duplicate function call suppression
// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight
// package. The package is forked, because the package a part of the unstable
// and unversioned golang.org/x/sync module.
//
// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight
package singleflight

View File

@ -2,11 +2,44 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package singleflight provides a duplicate function call suppression
// mechanism.
package singleflight
import "sync"
import (
"bytes"
"errors"
"fmt"
"runtime"
"runtime/debug"
"sync"
)
// errGoexit indicates the runtime.Goexit was called in
// the user given function.
var errGoexit = errors.New("runtime.Goexit was called")
// A panicError is an arbitrary value recovered from a panic
// with the stack trace during the execution of given function.
type panicError struct {
value interface{}
stack []byte
}
// Error implements error interface.
func (p *panicError) Error() string {
return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
}
func newPanicError(v interface{}) error {
stack := debug.Stack()
// The first line of the stack trace is of the form "goroutine N [status]:"
// but by the time the panic reaches Do the goroutine may no longer exist
// and its status will have changed. Trim out the misleading line.
if line := bytes.IndexByte(stack[:], '\n'); line >= 0 {
stack = stack[line+1:]
}
return &panicError{value: v, stack: stack}
}
// call is an in-flight or completed singleflight.Do call
type call struct {
@ -57,6 +90,12 @@ func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, e
c.dups++
g.mu.Unlock()
c.wg.Wait()
if e, ok := c.err.(*panicError); ok {
panic(e)
} else if c.err == errGoexit {
runtime.Goexit()
}
return c.val, c.err, true
}
c := new(call)
@ -70,6 +109,8 @@ func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, e
// DoChan is like Do but returns a channel that will receive the
// results when they are ready.
//
// The returned channel will not be closed.
func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
ch := make(chan Result, 1)
g.mu.Lock()
@ -94,17 +135,66 @@ func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result
// doCall handles the single call for a key.
func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
c.val, c.err = fn()
c.wg.Done()
normalReturn := false
recovered := false
// use double-defer to distinguish panic from runtime.Goexit,
// more details see https://golang.org/cl/134395
defer func() {
// the given function invoked runtime.Goexit
if !normalReturn && !recovered {
c.err = errGoexit
}
c.wg.Done()
g.mu.Lock()
defer g.mu.Unlock()
if !c.forgotten {
delete(g.m, key)
}
if e, ok := c.err.(*panicError); ok {
// In order to prevent the waiting channels from being blocked forever,
// needs to ensure that this panic cannot be recovered.
if len(c.chans) > 0 {
go panic(e)
select {} // Keep this goroutine around so that it will appear in the crash dump.
} else {
panic(e)
}
} else if c.err == errGoexit {
// Already in the process of goexit, no need to call again
} else {
// Normal return
for _, ch := range c.chans {
ch <- Result{c.val, c.err, c.dups > 0}
}
g.mu.Unlock()
}
}()
func() {
defer func() {
if !normalReturn {
// Ideally, we would wait to take a stack trace until we've determined
// whether this is a panic or a runtime.Goexit.
//
// Unfortunately, the only way we can distinguish the two is to see
// whether the recover stopped the goroutine from terminating, and by
// the time we know that, the part of the stack trace relevant to the
// panic has been discarded.
if r := recover(); r != nil {
c.err = newPanicError(r)
}
}
}()
c.val, c.err = fn()
normalReturn = true
}()
if !normalReturn {
recovered = true
}
}
// Forget tells the singleflight to forget about a key. Future calls

View File

@ -0,0 +1,16 @@
# v1.0.3 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.2 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.1 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.0 (2022-04-07)
* **Release**: New internal v4a signing module location.

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,6 @@
// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package v4a
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.0.3"

View File

@ -1,5 +1,3 @@
// TODO(GOSDK-1220): This signer has removed the conceptual knowledge of UNSIGNED-PAYLOAD and X-Amz-Content-Sha256
package v4a
import (
@ -22,8 +20,8 @@ import (
"strings"
"time"
signerCrypto "github.com/aws/aws-sdk-go-v2/service/s3/internal/v4a/internal/crypto"
v4Internal "github.com/aws/aws-sdk-go-v2/service/s3/internal/v4a/internal/v4"
signerCrypto "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto"
v4Internal "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4"
"github.com/aws/smithy-go/encoding/httpbinding"
"github.com/aws/smithy-go/logging"
)
@ -440,7 +438,15 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he
} else {
canonicalHeaders.WriteString(headers[i])
canonicalHeaders.WriteRune(colon)
canonicalHeaders.WriteString(strings.Join(signed[headers[i]], ","))
// Trim out leading, trailing, and dedup inner spaces from signed header values.
values := signed[headers[i]]
for j, v := range values {
cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
canonicalHeaders.WriteString(cleanedValue)
if j < len(values)-1 {
canonicalHeaders.WriteRune(',')
}
}
}
canonicalHeaders.WriteRune('\n')
}

View File

@ -1,4 +1,4 @@
#1/usr/bin/env bash
#!/usr/bin/env bash
PROJECT_DIR=""
SDK_SOURCE_DIR=$(cd `dirname $0` && pwd)
@ -30,7 +30,7 @@ while getopts "hs:d:" options; do
done
if [ "$PROJECT_DIR" != "" ]; then
cd $PROJECT_DIR || exit
cd "$PROJECT_DIR" || exit
fi
go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/aws-sdk-go-v2" | while read x; do

View File

@ -1,14 +1,25 @@
[dependencies]
"github.com/aws/smithy-go" = "v1.9.0"
"github.com/google/go-cmp" = "v0.5.6"
"github.com/aws/aws-sdk-go" = "v1.44.28"
"github.com/aws/smithy-go" = "v1.11.3"
"github.com/google/go-cmp" = "v0.5.8"
"github.com/jmespath/go-jmespath" = "v0.4.0"
"golang.org/x/net" = "v0.0.0-20220127200216-cd36cc0744dd"
[modules]
[modules."."]
metadata_package = "aws"
[modules.codegen]
no_tag = true
[modules."example/service/dynamodb/createTable"]
no_tag = true
[modules."example/service/dynamodb/scanItems"]
no_tag = true
[modules."example/service/s3/listObjects"]
no_tag = true
@ -18,6 +29,9 @@
[modules."feature/ec2/imds/internal/configtesting"]
no_tag = true
[modules."internal/codegen"]
no_tag = true
[modules."internal/configsources/configtesting"]
no_tag = true

View File

@ -1,3 +1,27 @@
# v1.9.2 (2022-06-07)
* No change notes available for this release.
# v1.9.1 (2022-03-24)
* No change notes available for this release.
# v1.9.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.8.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.7.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.6.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.5.0 (2021-11-06)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version

View File

@ -3,4 +3,4 @@
package acceptencoding
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.5.0"
const goModuleVersion = "1.9.2"

View File

@ -0,0 +1,40 @@
# v1.1.7 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.6 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.5 (2022-04-27)
* **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors.
# v1.1.4 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.0 (2022-03-08)
* **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.0 (2022-02-24)
* **Release**: New module for computing checksums
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,323 @@
package checksum
import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"hash/crc32"
"io"
"strings"
"sync"
)
// Algorithm represents the checksum algorithms supported
type Algorithm string
// Enumeration values for supported checksum Algorithms.
const (
// AlgorithmCRC32C represents CRC32C hash algorithm
AlgorithmCRC32C Algorithm = "CRC32C"
// AlgorithmCRC32 represents CRC32 hash algorithm
AlgorithmCRC32 Algorithm = "CRC32"
// AlgorithmSHA1 represents SHA1 hash algorithm
AlgorithmSHA1 Algorithm = "SHA1"
// AlgorithmSHA256 represents SHA256 hash algorithm
AlgorithmSHA256 Algorithm = "SHA256"
)
var supportedAlgorithms = []Algorithm{
AlgorithmCRC32C,
AlgorithmCRC32,
AlgorithmSHA1,
AlgorithmSHA256,
}
func (a Algorithm) String() string { return string(a) }
// ParseAlgorithm attempts to parse the provided value into a checksum
// algorithm, matching without case. Returns the algorithm matched, or an error
// if the algorithm wasn't matched.
func ParseAlgorithm(v string) (Algorithm, error) {
for _, a := range supportedAlgorithms {
if strings.EqualFold(string(a), v) {
return a, nil
}
}
return "", fmt.Errorf("unknown checksum algorithm, %v", v)
}
// FilterSupportedAlgorithms filters the set of algorithms, returning a slice
// of algorithms that are supported.
func FilterSupportedAlgorithms(vs []string) []Algorithm {
found := map[Algorithm]struct{}{}
supported := make([]Algorithm, 0, len(supportedAlgorithms))
for _, v := range vs {
for _, a := range supportedAlgorithms {
// Only consider algorithms that are supported
if !strings.EqualFold(v, string(a)) {
continue
}
// Ignore duplicate algorithms in list.
if _, ok := found[a]; ok {
continue
}
supported = append(supported, a)
found[a] = struct{}{}
}
}
return supported
}
// NewAlgorithmHash returns a hash.Hash for the checksum algorithm. Error is
// returned if the algorithm is unknown.
func NewAlgorithmHash(v Algorithm) (hash.Hash, error) {
switch v {
case AlgorithmSHA1:
return sha1.New(), nil
case AlgorithmSHA256:
return sha256.New(), nil
case AlgorithmCRC32:
return crc32.NewIEEE(), nil
case AlgorithmCRC32C:
return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil
default:
return nil, fmt.Errorf("unknown checksum algorithm, %v", v)
}
}
// AlgorithmChecksumLength returns the length of the algorithm's checksum in
// bytes. If the algorithm is not known, an error is returned.
func AlgorithmChecksumLength(v Algorithm) (int, error) {
switch v {
case AlgorithmSHA1:
return sha1.Size, nil
case AlgorithmSHA256:
return sha256.Size, nil
case AlgorithmCRC32:
return crc32.Size, nil
case AlgorithmCRC32C:
return crc32.Size, nil
default:
return 0, fmt.Errorf("unknown checksum algorithm, %v", v)
}
}
const awsChecksumHeaderPrefix = "x-amz-checksum-"
// AlgorithmHTTPHeader returns the HTTP header for the algorithm's hash.
func AlgorithmHTTPHeader(v Algorithm) string {
return awsChecksumHeaderPrefix + strings.ToLower(string(v))
}
// base64EncodeHashSum computes base64 encoded checksum of a given running
// hash. The running hash must already have content written to it. Returns the
// byte slice of checksum and an error
func base64EncodeHashSum(h hash.Hash) []byte {
sum := h.Sum(nil)
sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
base64.StdEncoding.Encode(sum64, sum)
return sum64
}
// hexEncodeHashSum computes hex encoded checksum of a given running hash. The
// running hash must already have content written to it. Returns the byte slice
// of checksum and an error
func hexEncodeHashSum(h hash.Hash) []byte {
sum := h.Sum(nil)
sumHex := make([]byte, hex.EncodedLen(len(sum)))
hex.Encode(sumHex, sum)
return sumHex
}
// computeMD5Checksum computes base64 MD5 checksum of an io.Reader's contents.
// Returns the byte slice of MD5 checksum and an error.
func computeMD5Checksum(r io.Reader) ([]byte, error) {
h := md5.New()
// Copy errors may be assumed to be from the body.
if _, err := io.Copy(h, r); err != nil {
return nil, fmt.Errorf("failed compute MD5 hash of reader, %w", err)
}
// Encode the MD5 checksum in base64.
return base64EncodeHashSum(h), nil
}
// computeChecksumReader provides a reader wrapping an underlying io.Reader to
// compute the checksum of the stream's bytes.
type computeChecksumReader struct {
stream io.Reader
algorithm Algorithm
hasher hash.Hash
base64ChecksumLen int
mux sync.RWMutex
lockedChecksum string
lockedErr error
}
// newComputeChecksumReader returns a computeChecksumReader for the stream and
// algorithm specified. Returns error if unable to create the reader, or
// algorithm is unknown.
func newComputeChecksumReader(stream io.Reader, algorithm Algorithm) (*computeChecksumReader, error) {
hasher, err := NewAlgorithmHash(algorithm)
if err != nil {
return nil, err
}
checksumLength, err := AlgorithmChecksumLength(algorithm)
if err != nil {
return nil, err
}
return &computeChecksumReader{
stream: io.TeeReader(stream, hasher),
algorithm: algorithm,
hasher: hasher,
base64ChecksumLen: base64.StdEncoding.EncodedLen(checksumLength),
}, nil
}
// Read wraps the underlying reader. When the underlying reader returns EOF,
// the checksum of the reader will be computed, and can be retrieved with
// ChecksumBase64String.
func (r *computeChecksumReader) Read(p []byte) (int, error) {
n, err := r.stream.Read(p)
if err == nil {
return n, nil
} else if err != io.EOF {
r.mux.Lock()
defer r.mux.Unlock()
r.lockedErr = err
return n, err
}
b := base64EncodeHashSum(r.hasher)
r.mux.Lock()
defer r.mux.Unlock()
r.lockedChecksum = string(b)
return n, err
}
func (r *computeChecksumReader) Algorithm() Algorithm {
return r.algorithm
}
// Base64ChecksumLength returns the base64 encoded length of the checksum for
// algorithm.
func (r *computeChecksumReader) Base64ChecksumLength() int {
return r.base64ChecksumLen
}
// Base64Checksum returns the base64 checksum for the algorithm, or error if
// the underlying reader returned a non-EOF error.
//
// Safe to be called concurrently, but will return an error until after the
// underlying reader is returns EOF.
func (r *computeChecksumReader) Base64Checksum() (string, error) {
r.mux.RLock()
defer r.mux.RUnlock()
if r.lockedErr != nil {
return "", r.lockedErr
}
if r.lockedChecksum == "" {
return "", fmt.Errorf(
"checksum not available yet, called before reader returns EOF",
)
}
return r.lockedChecksum, nil
}
// validateChecksumReader implements io.ReadCloser interface. The wrapper
// performs checksum validation when the underlying reader has been fully read.
type validateChecksumReader struct {
originalBody io.ReadCloser
body io.Reader
hasher hash.Hash
algorithm Algorithm
expectChecksum string
}
// newValidateChecksumReader returns a configured io.ReadCloser that performs
// checksum validation when the underlying reader has been fully read.
func newValidateChecksumReader(
body io.ReadCloser,
algorithm Algorithm,
expectChecksum string,
) (*validateChecksumReader, error) {
hasher, err := NewAlgorithmHash(algorithm)
if err != nil {
return nil, err
}
return &validateChecksumReader{
originalBody: body,
body: io.TeeReader(body, hasher),
hasher: hasher,
algorithm: algorithm,
expectChecksum: expectChecksum,
}, nil
}
// Read attempts to read from the underlying stream while also updating the
// running hash. If the underlying stream returns with an EOF error, the
// checksum of the stream will be collected, and compared against the expected
// checksum. If the checksums do not match, an error will be returned.
//
// If a non-EOF error occurs when reading the underlying stream, that error
// will be returned and the checksum for the stream will be discarded.
func (c *validateChecksumReader) Read(p []byte) (n int, err error) {
n, err = c.body.Read(p)
if err == io.EOF {
if checksumErr := c.validateChecksum(); checksumErr != nil {
return n, checksumErr
}
}
return n, err
}
// Close closes the underlying reader, returning any error that occurred in the
// underlying reader.
func (c *validateChecksumReader) Close() (err error) {
return c.originalBody.Close()
}
func (c *validateChecksumReader) validateChecksum() error {
// Compute base64 encoded checksum hash of the payload's read bytes.
v := base64EncodeHashSum(c.hasher)
if e, a := c.expectChecksum, string(v); !strings.EqualFold(e, a) {
return validationError{
Algorithm: c.algorithm, Expect: e, Actual: a,
}
}
return nil
}
type validationError struct {
Algorithm Algorithm
Expect string
Actual string
}
func (v validationError) Error() string {
return fmt.Sprintf("checksum did not match: algorithm %v, expect %v, actual %v",
v.Algorithm, v.Expect, v.Actual)
}

View File

@ -0,0 +1,389 @@
package checksum
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
const (
crlf = "\r\n"
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
defaultChunkLength = 1024 * 64
awsTrailerHeaderName = "x-amz-trailer"
decodedContentLengthHeaderName = "x-amz-decoded-content-length"
contentEncodingHeaderName = "content-encoding"
awsChunkedContentEncodingHeaderValue = "aws-chunked"
trailerKeyValueSeparator = ":"
)
var (
crlfBytes = []byte(crlf)
finalChunkBytes = []byte("0" + crlf)
)
type awsChunkedEncodingOptions struct {
// The total size of the stream. For unsigned encoding this implies that
// there will only be a single chunk containing the underlying payload,
// unless ChunkLength is also specified.
StreamLength int64
// Set of trailer key:value pairs that will be appended to the end of the
// payload after the end chunk has been written.
Trailers map[string]awsChunkedTrailerValue
// The maximum size of each chunk to be sent. Default value of -1, signals
// that optimal chunk length will be used automatically. ChunkSize must be
// at least 8KB.
//
// If ChunkLength and StreamLength are both specified, the stream will be
// broken up into ChunkLength chunks. The encoded length of the aws-chunked
// encoding can still be determined as long as all trailers, if any, have a
// fixed length.
ChunkLength int
}
type awsChunkedTrailerValue struct {
// Function to retrieve the value of the trailer. Will only be called after
// the underlying stream returns EOF error.
Get func() (string, error)
// If the length of the value can be pre-determined, and is constant
// specify the length. A value of -1 means the length is unknown, or
// cannot be pre-determined.
Length int
}
// awsChunkedEncoding provides a reader that wraps the payload such that
// payload is read as a single aws-chunk payload. This reader can only be used
// if the content length of payload is known. Content-Length is used as size of
// the single payload chunk. The final chunk and trailing checksum is appended
// at the end.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
//
// Here is the aws-chunked payload stream as read from the awsChunkedEncoding
// if original request stream is "Hello world", and checksum hash used is SHA256
//
// <b>\r\n
// Hello world\r\n
// 0\r\n
// x-amz-checksum-sha256:ZOyIygCyaOW6GjVnihtTFtIS9PNmskdyMlNKiuyjfzw=\r\n
// \r\n
type awsChunkedEncoding struct {
options awsChunkedEncodingOptions
encodedStream io.Reader
trailerEncodedLength int
}
// newUnsignedAWSChunkedEncoding returns a new awsChunkedEncoding configured
// for unsigned aws-chunked content encoding. Any additional trailers that need
// to be appended after the end chunk must be included as via Trailer
// callbacks.
func newUnsignedAWSChunkedEncoding(
stream io.Reader,
optFns ...func(*awsChunkedEncodingOptions),
) *awsChunkedEncoding {
options := awsChunkedEncodingOptions{
Trailers: map[string]awsChunkedTrailerValue{},
StreamLength: -1,
ChunkLength: -1,
}
for _, fn := range optFns {
fn(&options)
}
var chunkReader io.Reader
if options.ChunkLength != -1 || options.StreamLength == -1 {
if options.ChunkLength == -1 {
options.ChunkLength = defaultChunkLength
}
chunkReader = newBufferedAWSChunkReader(stream, options.ChunkLength)
} else {
chunkReader = newUnsignedChunkReader(stream, options.StreamLength)
}
trailerReader := newAWSChunkedTrailerReader(options.Trailers)
return &awsChunkedEncoding{
options: options,
encodedStream: io.MultiReader(chunkReader,
trailerReader,
bytes.NewBuffer(crlfBytes),
),
trailerEncodedLength: trailerReader.EncodedLength(),
}
}
// EncodedLength returns the final length of the aws-chunked content encoded
// stream if it can be determined without reading the underlying stream or lazy
// header values, otherwise -1 is returned.
func (e *awsChunkedEncoding) EncodedLength() int64 {
var length int64
if e.options.StreamLength == -1 || e.trailerEncodedLength == -1 {
return -1
}
if e.options.StreamLength != 0 {
// If the stream length is known, and there is no chunk length specified,
// only a single chunk will be used. Otherwise the stream length needs to
// include the multiple chunk padding content.
if e.options.ChunkLength == -1 {
length += getUnsignedChunkBytesLength(e.options.StreamLength)
} else {
// Compute chunk header and payload length
numChunks := e.options.StreamLength / int64(e.options.ChunkLength)
length += numChunks * getUnsignedChunkBytesLength(int64(e.options.ChunkLength))
if remainder := e.options.StreamLength % int64(e.options.ChunkLength); remainder != 0 {
length += getUnsignedChunkBytesLength(remainder)
}
}
}
// End chunk
length += int64(len(finalChunkBytes))
// Trailers
length += int64(e.trailerEncodedLength)
// Encoding terminator
length += int64(len(crlf))
return length
}
func getUnsignedChunkBytesLength(payloadLength int64) int64 {
payloadLengthStr := strconv.FormatInt(payloadLength, 16)
return int64(len(payloadLengthStr)) + int64(len(crlf)) + payloadLength + int64(len(crlf))
}
// HTTPHeaders returns the set of headers that must be included the request for
// aws-chunked to work. This includes the content-encoding: aws-chunked header.
//
// If there are multiple layered content encoding, the aws-chunked encoding
// must be appended to the previous layers the stream's encoding. The best way
// to do this is to append all header values returned to the HTTP request's set
// of headers.
func (e *awsChunkedEncoding) HTTPHeaders() map[string][]string {
headers := map[string][]string{
contentEncodingHeaderName: {
awsChunkedContentEncodingHeaderValue,
},
}
if len(e.options.Trailers) != 0 {
trailers := make([]string, 0, len(e.options.Trailers))
for name := range e.options.Trailers {
trailers = append(trailers, strings.ToLower(name))
}
headers[awsTrailerHeaderName] = trailers
}
return headers
}
func (e *awsChunkedEncoding) Read(b []byte) (n int, err error) {
return e.encodedStream.Read(b)
}
// awsChunkedTrailerReader provides a lazy reader for reading of aws-chunked
// content encoded trailers. The trailer values will not be retrieved until the
// reader is read from.
type awsChunkedTrailerReader struct {
reader *bytes.Buffer
trailers map[string]awsChunkedTrailerValue
trailerEncodedLength int
}
// newAWSChunkedTrailerReader returns an initialized awsChunkedTrailerReader to
// lazy reading aws-chunk content encoded trailers.
func newAWSChunkedTrailerReader(trailers map[string]awsChunkedTrailerValue) *awsChunkedTrailerReader {
return &awsChunkedTrailerReader{
trailers: trailers,
trailerEncodedLength: trailerEncodedLength(trailers),
}
}
func trailerEncodedLength(trailers map[string]awsChunkedTrailerValue) (length int) {
for name, trailer := range trailers {
length += len(name) + len(trailerKeyValueSeparator)
l := trailer.Length
if l == -1 {
return -1
}
length += l + len(crlf)
}
return length
}
// EncodedLength returns the length of the encoded trailers if the length could
// be determined without retrieving the header values. Returns -1 if length is
// unknown.
func (r *awsChunkedTrailerReader) EncodedLength() (length int) {
return r.trailerEncodedLength
}
// Read populates the passed in byte slice with bytes from the encoded
// trailers. Will lazy read header values first time Read is called.
func (r *awsChunkedTrailerReader) Read(p []byte) (int, error) {
if r.trailerEncodedLength == 0 {
return 0, io.EOF
}
if r.reader == nil {
trailerLen := r.trailerEncodedLength
if r.trailerEncodedLength == -1 {
trailerLen = 0
}
r.reader = bytes.NewBuffer(make([]byte, 0, trailerLen))
for name, trailer := range r.trailers {
r.reader.WriteString(name)
r.reader.WriteString(trailerKeyValueSeparator)
v, err := trailer.Get()
if err != nil {
return 0, fmt.Errorf("failed to get trailer value, %w", err)
}
r.reader.WriteString(v)
r.reader.WriteString(crlf)
}
}
return r.reader.Read(p)
}
// newUnsignedChunkReader returns an io.Reader encoding the underlying reader
// as unsigned aws-chunked chunks. The returned reader will also include the
// end chunk, but not the aws-chunked final `crlf` segment so trailers can be
// added.
//
// If the payload size is -1 for unknown length the content will be buffered in
// defaultChunkLength chunks before wrapped in aws-chunked chunk encoding.
func newUnsignedChunkReader(reader io.Reader, payloadSize int64) io.Reader {
if payloadSize == -1 {
return newBufferedAWSChunkReader(reader, defaultChunkLength)
}
var endChunk bytes.Buffer
if payloadSize == 0 {
endChunk.Write(finalChunkBytes)
return &endChunk
}
endChunk.WriteString(crlf)
endChunk.Write(finalChunkBytes)
var header bytes.Buffer
header.WriteString(strconv.FormatInt(payloadSize, 16))
header.WriteString(crlf)
return io.MultiReader(
&header,
reader,
&endChunk,
)
}
// Provides a buffered aws-chunked chunk encoder of an underlying io.Reader.
// Will include end chunk, but not the aws-chunked final `crlf` segment so
// trailers can be added.
//
// Note does not implement support for chunk extensions, e.g. chunk signing.
type bufferedAWSChunkReader struct {
reader io.Reader
chunkSize int
chunkSizeStr string
headerBuffer *bytes.Buffer
chunkBuffer *bytes.Buffer
multiReader io.Reader
multiReaderLen int
endChunkDone bool
}
// newBufferedAWSChunkReader returns an bufferedAWSChunkReader for reading
// aws-chunked encoded chunks.
func newBufferedAWSChunkReader(reader io.Reader, chunkSize int) *bufferedAWSChunkReader {
return &bufferedAWSChunkReader{
reader: reader,
chunkSize: chunkSize,
chunkSizeStr: strconv.FormatInt(int64(chunkSize), 16),
headerBuffer: bytes.NewBuffer(make([]byte, 0, 64)),
chunkBuffer: bytes.NewBuffer(make([]byte, 0, chunkSize+len(crlf))),
}
}
// Read attempts to read from the underlying io.Reader writing aws-chunked
// chunk encoded bytes to p. When the underlying io.Reader has been completed
// read the end chunk will be available. Once the end chunk is read, the reader
// will return EOF.
func (r *bufferedAWSChunkReader) Read(p []byte) (n int, err error) {
if r.multiReaderLen == 0 && r.endChunkDone {
return 0, io.EOF
}
if r.multiReader == nil || r.multiReaderLen == 0 {
r.multiReader, r.multiReaderLen, err = r.newMultiReader()
if err != nil {
return 0, err
}
}
n, err = r.multiReader.Read(p)
r.multiReaderLen -= n
if err == io.EOF && !r.endChunkDone {
// Edge case handling when the multi-reader has been completely read,
// and returned an EOF, make sure that EOF only gets returned if the
// end chunk was included in the multi-reader. Otherwise, the next call
// to read will initialize the next chunk's multi-reader.
err = nil
}
return n, err
}
// newMultiReader returns a new io.Reader for wrapping the next chunk. Will
// return an error if the underlying reader can not be read from. Will never
// return io.EOF.
func (r *bufferedAWSChunkReader) newMultiReader() (io.Reader, int, error) {
// io.Copy eats the io.EOF returned by io.LimitReader. Any error that
// occurs here is due to an actual read error.
n, err := io.Copy(r.chunkBuffer, io.LimitReader(r.reader, int64(r.chunkSize)))
if err != nil {
return nil, 0, err
}
if n == 0 {
// Early exit writing out only the end chunk. This does not include
// aws-chunk's final `crlf` so that trailers can still be added by
// upstream reader.
r.headerBuffer.Reset()
r.headerBuffer.WriteString("0")
r.headerBuffer.WriteString(crlf)
r.endChunkDone = true
return r.headerBuffer, r.headerBuffer.Len(), nil
}
r.chunkBuffer.WriteString(crlf)
chunkSizeStr := r.chunkSizeStr
if int(n) != r.chunkSize {
chunkSizeStr = strconv.FormatInt(n, 16)
}
r.headerBuffer.Reset()
r.headerBuffer.WriteString(chunkSizeStr)
r.headerBuffer.WriteString(crlf)
return io.MultiReader(
r.headerBuffer,
r.chunkBuffer,
), r.headerBuffer.Len() + r.chunkBuffer.Len(), nil
}

View File

@ -0,0 +1,6 @@
// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package checksum
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.1.7"

View File

@ -0,0 +1,185 @@
package checksum
import (
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// InputMiddlewareOptions provides the options for the request
// checksum middleware setup.
type InputMiddlewareOptions struct {
// GetAlgorithm is a function to get the checksum algorithm of the
// input payload from the input parameters.
//
// Given the input parameter value, the function must return the algorithm
// and true, or false if no algorithm is specified.
GetAlgorithm func(interface{}) (string, bool)
// Forces the middleware to compute the input payload's checksum. The
// request will fail if the algorithm is not specified or unable to compute
// the checksum.
RequireChecksum bool
// Enables support for wrapping the serialized input payload with a
// content-encoding: aws-check wrapper, and including a trailer for the
// algorithm's checksum value.
//
// The checksum will not be computed, nor added as trailing checksum, if
// the Algorithm's header is already set on the request.
EnableTrailingChecksum bool
// Enables support for computing the SHA256 checksum of input payloads
// along with the algorithm specified checksum. Prevents downstream
// middleware handlers (computePayloadSHA256) re-reading the payload.
//
// The SHA256 payload checksum will only be used for computed for requests
// that are not TLS, or do not enable trailing checksums.
//
// The SHA256 payload hash will not be computed, if the Algorithm's header
// is already set on the request.
EnableComputeSHA256PayloadHash bool
// Enables support for setting the aws-chunked decoded content length
// header for the decoded length of the underlying stream. Will only be set
// when used with trailing checksums, and aws-chunked content-encoding.
EnableDecodedContentLengthHeader bool
}
// AddInputMiddleware adds the middleware for performing checksum computing
// of request payloads, and checksum validation of response payloads.
func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) {
// TODO ensure this works correctly with presigned URLs
// Middleware stack:
// * (OK)(Initialize) --none--
// * (OK)(Serialize) EndpointResolver
// * (OK)(Build) ComputeContentLength
// * (AD)(Build) Header ComputeInputPayloadChecksum
// * SIGNED Payload - If HTTP && not support trailing checksum
// * UNSIGNED Payload - If HTTPS && not support trailing checksum
// * (RM)(Build) ContentChecksum - OK to remove
// * (OK)(Build) ComputePayloadHash
// * v4.dynamicPayloadSigningMiddleware
// * v4.computePayloadSHA256
// * v4.unsignedPayload
// (OK)(Build) Set computedPayloadHash header
// * (OK)(Finalize) Retry
// * (AD)(Finalize) Trailer ComputeInputPayloadChecksum,
// * Requires HTTPS && support trailing checksum
// * UNSIGNED Payload
// * Finalize run if HTTPS && support trailing checksum
// * (OK)(Finalize) Signing
// * (OK)(Deserialize) --none--
// Initial checksum configuration look up middleware
err = stack.Initialize.Add(&setupInputContext{
GetAlgorithm: options.GetAlgorithm,
}, middleware.Before)
if err != nil {
return err
}
stack.Build.Remove("ContentChecksum")
// Create the compute checksum middleware that will be added as both a
// build and finalize handler.
inputChecksum := &computeInputPayloadChecksum{
RequireChecksum: options.RequireChecksum,
EnableTrailingChecksum: options.EnableTrailingChecksum,
EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash,
EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader,
}
// Insert header checksum after ComputeContentLength middleware, must also
// be before the computePayloadHash middleware handlers.
err = stack.Build.Insert(inputChecksum,
(*smithyhttp.ComputeContentLength)(nil).ID(),
middleware.After)
if err != nil {
return err
}
// If trailing checksum is not supported no need for finalize handler to be added.
if options.EnableTrailingChecksum {
err = stack.Finalize.Insert(inputChecksum, "Retry", middleware.After)
if err != nil {
return err
}
}
return nil
}
// RemoveInputMiddleware Removes the compute input payload checksum middleware
// handlers from the stack.
func RemoveInputMiddleware(stack *middleware.Stack) {
id := (*setupInputContext)(nil).ID()
stack.Initialize.Remove(id)
id = (*computeInputPayloadChecksum)(nil).ID()
stack.Build.Remove(id)
stack.Finalize.Remove(id)
}
// OutputMiddlewareOptions provides options for configuring output checksum
// validation middleware.
type OutputMiddlewareOptions struct {
// GetValidationMode is a function to get the checksum validation
// mode of the output payload from the input parameters.
//
// Given the input parameter value, the function must return the validation
// mode and true, or false if no mode is specified.
GetValidationMode func(interface{}) (string, bool)
// The set of checksum algorithms that should be used for response payload
// checksum validation. The algorithm(s) used will be a union of the
// output's returned algorithms and this set.
//
// Only the first algorithm in the union is currently used.
ValidationAlgorithms []string
// If set the middleware will ignore output multipart checksums. Otherwise
// an checksum format error will be returned by the middleware.
IgnoreMultipartValidation bool
// When set the middleware will log when output does not have checksum or
// algorithm to validate.
LogValidationSkipped bool
// When set the middleware will log when the output contains a multipart
// checksum that was, skipped and not validated.
LogMultipartValidationSkipped bool
}
// AddOutputMiddleware adds the middleware for validating response payload's
// checksum.
func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error {
err := stack.Initialize.Add(&setupOutputContext{
GetValidationMode: options.GetValidationMode,
}, middleware.Before)
if err != nil {
return err
}
// Resolve a supported priority order list of algorithms to validate.
algorithms := FilterSupportedAlgorithms(options.ValidationAlgorithms)
m := &validateOutputPayloadChecksum{
Algorithms: algorithms,
IgnoreMultipartValidation: options.IgnoreMultipartValidation,
LogMultipartValidationSkipped: options.LogMultipartValidationSkipped,
LogValidationSkipped: options.LogValidationSkipped,
}
return stack.Deserialize.Add(m, middleware.After)
}
// RemoveOutputMiddleware Removes the compute input payload checksum middleware
// handlers from the stack.
func RemoveOutputMiddleware(stack *middleware.Stack) {
id := (*setupOutputContext)(nil).ID()
stack.Initialize.Remove(id)
id = (*validateOutputPayloadChecksum)(nil).ID()
stack.Deserialize.Remove(id)
}

View File

@ -0,0 +1,480 @@
package checksum
import (
"context"
"crypto/sha256"
"fmt"
"hash"
"io"
"strconv"
"strings"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
const (
contentMD5Header = "Content-Md5"
streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
)
// computedInputChecksumsKey is the metadata key for recording the algorithm the
// checksum was computed for and the checksum value.
type computedInputChecksumsKey struct{}
// GetComputedInputChecksums returns the map of checksum algorithm to their
// computed value stored in the middleware Metadata. Returns false if no values
// were stored in the Metadata.
func GetComputedInputChecksums(m middleware.Metadata) (map[string]string, bool) {
vs, ok := m.Get(computedInputChecksumsKey{}).(map[string]string)
return vs, ok
}
// SetComputedInputChecksums stores the map of checksum algorithm to their
// computed value in the middleware Metadata. Overwrites any values that
// currently exist in the metadata.
func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) {
m.Set(computedInputChecksumsKey{}, vs)
}
// computeInputPayloadChecksum middleware computes payload checksum
type computeInputPayloadChecksum struct {
// Enables support for wrapping the serialized input payload with a
// content-encoding: aws-check wrapper, and including a trailer for the
// algorithm's checksum value.
//
// The checksum will not be computed, nor added as trailing checksum, if
// the Algorithm's header is already set on the request.
EnableTrailingChecksum bool
// States that a checksum is required to be included for the operation. If
// Input does not specify a checksum, fallback to built in MD5 checksum is
// used.
//
// Replaces smithy-go's ContentChecksum middleware.
RequireChecksum bool
// Enables support for computing the SHA256 checksum of input payloads
// along with the algorithm specified checksum. Prevents downstream
// middleware handlers (computePayloadSHA256) re-reading the payload.
//
// The SHA256 payload hash will only be used for computed for requests
// that are not TLS, or do not enable trailing checksums.
//
// The SHA256 payload hash will not be computed, if the Algorithm's header
// is already set on the request.
EnableComputePayloadHash bool
// Enables support for setting the aws-chunked decoded content length
// header for the decoded length of the underlying stream. Will only be set
// when used with trailing checksums, and aws-chunked content-encoding.
EnableDecodedContentLengthHeader bool
buildHandlerRun bool
deferToFinalizeHandler bool
}
// ID provides the middleware's identifier.
func (m *computeInputPayloadChecksum) ID() string {
return "AWSChecksum:ComputeInputPayloadChecksum"
}
type computeInputHeaderChecksumError struct {
Msg string
Err error
}
func (e computeInputHeaderChecksumError) Error() string {
const intro = "compute input header checksum failed"
if e.Err != nil {
return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err)
}
return fmt.Sprintf("%s, %s", intro, e.Msg)
}
func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err }
// HandleBuild handles computing the payload's checksum, in the following cases:
// * Is HTTP, not HTTPS
// * RequireChecksum is true, and no checksums were specified via the Input
// * Trailing checksums are not supported
//
// The build handler must be inserted in the stack before ContentPayloadHash
// and after ComputeContentLength.
func (m *computeInputPayloadChecksum) HandleBuild(
ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
) (
out middleware.BuildOutput, metadata middleware.Metadata, err error,
) {
m.buildHandlerRun = true
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, computeInputHeaderChecksumError{
Msg: fmt.Sprintf("unknown request type %T", req),
}
}
var algorithm Algorithm
var checksum string
defer func() {
if algorithm == "" || checksum == "" || err != nil {
return
}
// Record the checksum and algorithm that was computed
SetComputedInputChecksums(&metadata, map[string]string{
string(algorithm): checksum,
})
}()
// If no algorithm was specified, and the operation requires a checksum,
// fallback to the legacy content MD5 checksum.
algorithm, ok, err = getInputAlgorithm(ctx)
if err != nil {
return out, metadata, err
} else if !ok {
if m.RequireChecksum {
checksum, err = setMD5Checksum(ctx, req)
if err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to compute stream's MD5 checksum",
Err: err,
}
}
algorithm = Algorithm("MD5")
}
return next.HandleBuild(ctx, in)
}
// If the checksum header is already set nothing to do.
checksumHeader := AlgorithmHTTPHeader(algorithm)
if checksum = req.Header.Get(checksumHeader); checksum != "" {
return next.HandleBuild(ctx, in)
}
computePayloadHash := m.EnableComputePayloadHash
if v := v4.GetPayloadHash(ctx); v != "" {
computePayloadHash = false
}
stream := req.GetStream()
streamLength, err := getRequestStreamLength(req)
if err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to determine stream length",
Err: err,
}
}
// If trailing checksums are supported, the request is HTTPS, and the
// stream is not nil or empty, there is nothing to do in the build stage.
// The checksum will be added to the request as a trailing checksum in the
// finalize handler.
//
// Nil and empty streams will always be handled as a request header,
// regardless if the operation supports trailing checksums or not.
if strings.EqualFold(req.URL.Scheme, "https") {
if stream != nil && streamLength != 0 && m.EnableTrailingChecksum {
if m.EnableComputePayloadHash {
// payload hash is set as header in Build middleware handler,
// ContentSHA256Header.
ctx = v4.SetPayloadHash(ctx, streamingUnsignedPayloadTrailerPayloadHash)
}
m.deferToFinalizeHandler = true
return next.HandleBuild(ctx, in)
}
// If trailing checksums are not enabled but protocol is still HTTPS
// disabling computing the payload hash. Downstream middleware handler
// (ComputetPayloadHash) will set the payload hash to unsigned payload,
// if signing was used.
computePayloadHash = false
}
// Only seekable streams are supported for non-trailing checksums, because
// the stream needs to be rewound before the handler can continue.
if stream != nil && !req.IsStreamSeekable() {
return out, metadata, computeInputHeaderChecksumError{
Msg: "unseekable stream is not supported without TLS and trailing checksum",
}
}
var sha256Checksum string
checksum, sha256Checksum, err = computeStreamChecksum(
algorithm, stream, computePayloadHash)
if err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to compute stream checksum",
Err: err,
}
}
if err := req.RewindStream(); err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to rewind stream",
Err: err,
}
}
req.Header.Set(checksumHeader, checksum)
if computePayloadHash {
ctx = v4.SetPayloadHash(ctx, sha256Checksum)
}
return next.HandleBuild(ctx, in)
}
type computeInputTrailingChecksumError struct {
Msg string
Err error
}
func (e computeInputTrailingChecksumError) Error() string {
const intro = "compute input trailing checksum failed"
if e.Err != nil {
return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err)
}
return fmt.Sprintf("%s, %s", intro, e.Msg)
}
func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err }
// HandleFinalize handles computing the payload's checksum, in the following cases:
// * Is HTTPS, not HTTP
// * A checksum was specified via the Input
// * Trailing checksums are supported.
//
// The finalize handler must be inserted in the stack before Signing, and after Retry.
func (m *computeInputPayloadChecksum) HandleFinalize(
ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
if !m.deferToFinalizeHandler {
if !m.buildHandlerRun {
return out, metadata, computeInputTrailingChecksumError{
Msg: "build handler was removed without also removing finalize handler",
}
}
return next.HandleFinalize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, computeInputTrailingChecksumError{
Msg: fmt.Sprintf("unknown request type %T", req),
}
}
// Trailing checksums are only supported when TLS is enabled.
if !strings.EqualFold(req.URL.Scheme, "https") {
return out, metadata, computeInputTrailingChecksumError{
Msg: "HTTPS required",
}
}
// If no algorithm was specified, there is nothing to do.
algorithm, ok, err := getInputAlgorithm(ctx)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed to get algorithm",
Err: err,
}
} else if !ok {
return out, metadata, computeInputTrailingChecksumError{
Msg: "no algorithm specified",
}
}
// If the checksum header is already set before finalize could run, there
// is nothing to do.
checksumHeader := AlgorithmHTTPHeader(algorithm)
if req.Header.Get(checksumHeader) != "" {
return next.HandleFinalize(ctx, in)
}
stream := req.GetStream()
streamLength, err := getRequestStreamLength(req)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed to determine stream length",
Err: err,
}
}
if stream == nil || streamLength == 0 {
// Nil and empty streams are handled by the Build handler. They are not
// supported by the trailing checksums finalize handler. There is no
// benefit to sending them as trailers compared to headers.
return out, metadata, computeInputTrailingChecksumError{
Msg: "nil or empty streams are not supported",
}
}
checksumReader, err := newComputeChecksumReader(stream, algorithm)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed to created checksum reader",
Err: err,
}
}
awsChunkedReader := newUnsignedAWSChunkedEncoding(checksumReader,
func(o *awsChunkedEncodingOptions) {
o.Trailers[AlgorithmHTTPHeader(checksumReader.Algorithm())] = awsChunkedTrailerValue{
Get: checksumReader.Base64Checksum,
Length: checksumReader.Base64ChecksumLength(),
}
o.StreamLength = streamLength
})
for key, values := range awsChunkedReader.HTTPHeaders() {
for _, value := range values {
req.Header.Add(key, value)
}
}
// Setting the stream on the request will create a copy. The content length
// is not updated until after the request is copied to prevent impacting
// upstream middleware.
req, err = req.SetStream(awsChunkedReader)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed updating request to trailing checksum wrapped stream",
Err: err,
}
}
req.ContentLength = awsChunkedReader.EncodedLength()
in.Request = req
// Add decoded content length header if original stream's content length is known.
if streamLength != -1 && m.EnableDecodedContentLengthHeader {
req.Header.Set(decodedContentLengthHeaderName, strconv.FormatInt(streamLength, 10))
}
out, metadata, err = next.HandleFinalize(ctx, in)
if err == nil {
checksum, err := checksumReader.Base64Checksum()
if err != nil {
return out, metadata, fmt.Errorf("failed to get computed checksum, %w", err)
}
// Record the checksum and algorithm that was computed
SetComputedInputChecksums(&metadata, map[string]string{
string(algorithm): checksum,
})
}
return out, metadata, err
}
func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) {
ctxAlgorithm := getContextInputAlgorithm(ctx)
if ctxAlgorithm == "" {
return "", false, nil
}
algorithm, err := ParseAlgorithm(ctxAlgorithm)
if err != nil {
return "", false, fmt.Errorf(
"failed to parse algorithm, %w", err)
}
return algorithm, true, nil
}
func computeStreamChecksum(algorithm Algorithm, stream io.Reader, computePayloadHash bool) (
checksum string, sha256Checksum string, err error,
) {
hasher, err := NewAlgorithmHash(algorithm)
if err != nil {
return "", "", fmt.Errorf(
"failed to get hasher for checksum algorithm, %w", err)
}
var sha256Hasher hash.Hash
var batchHasher io.Writer = hasher
// Compute payload hash for the protocol. To prevent another handler
// (computePayloadSHA256) re-reading body also compute the SHA256 for
// request signing. If configured checksum algorithm is SHA256, don't
// double wrap stream with another SHA256 hasher.
if computePayloadHash && algorithm != AlgorithmSHA256 {
sha256Hasher = sha256.New()
batchHasher = io.MultiWriter(hasher, sha256Hasher)
}
if stream != nil {
if _, err = io.Copy(batchHasher, stream); err != nil {
return "", "", fmt.Errorf(
"failed to read stream to compute hash, %w", err)
}
}
checksum = string(base64EncodeHashSum(hasher))
if computePayloadHash {
if algorithm != AlgorithmSHA256 {
sha256Checksum = string(hexEncodeHashSum(sha256Hasher))
} else {
sha256Checksum = string(hexEncodeHashSum(hasher))
}
}
return checksum, sha256Checksum, nil
}
func getRequestStreamLength(req *smithyhttp.Request) (int64, error) {
if v := req.ContentLength; v > 0 {
return v, nil
}
if length, ok, err := req.StreamLength(); err != nil {
return 0, fmt.Errorf("failed getting request stream's length, %w", err)
} else if ok {
return length, nil
}
return -1, nil
}
// setMD5Checksum computes the MD5 of the request payload and sets it to the
// Content-MD5 header. Returning the MD5 base64 encoded string or error.
//
// If the MD5 is already set as the Content-MD5 header, that value will be
// returned, and nothing else will be done.
//
// If the payload is empty, no MD5 will be computed. No error will be returned.
// Empty payloads do not have an MD5 value.
//
// Replaces the smithy-go middleware for httpChecksum trait.
func setMD5Checksum(ctx context.Context, req *smithyhttp.Request) (string, error) {
if v := req.Header.Get(contentMD5Header); len(v) != 0 {
return v, nil
}
stream := req.GetStream()
if stream == nil {
return "", nil
}
if !req.IsStreamSeekable() {
return "", fmt.Errorf(
"unseekable stream is not supported for computing md5 checksum")
}
v, err := computeMD5Checksum(stream)
if err != nil {
return "", err
}
if err := req.RewindStream(); err != nil {
return "", fmt.Errorf("failed to rewind stream after computing MD5 checksum, %w", err)
}
// set the 'Content-MD5' header
req.Header.Set(contentMD5Header, string(v))
return string(v), nil
}

View File

@ -0,0 +1,117 @@
package checksum
import (
"context"
"github.com/aws/smithy-go/middleware"
)
// setupChecksumContext is the initial middleware that looks up the input
// used to configure checksum behavior. This middleware must be executed before
// input validation step or any other checksum middleware.
type setupInputContext struct {
// GetAlgorithm is a function to get the checksum algorithm of the
// input payload from the input parameters.
//
// Given the input parameter value, the function must return the algorithm
// and true, or false if no algorithm is specified.
GetAlgorithm func(interface{}) (string, bool)
}
// ID for the middleware
func (m *setupInputContext) ID() string {
return "AWSChecksum:SetupInputContext"
}
// HandleInitialize initialization middleware that setups up the checksum
// context based on the input parameters provided in the stack.
func (m *setupInputContext) HandleInitialize(
ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
// Check if validation algorithm is specified.
if m.GetAlgorithm != nil {
// check is input resource has a checksum algorithm
algorithm, ok := m.GetAlgorithm(in.Parameters)
if ok && len(algorithm) != 0 {
ctx = setContextInputAlgorithm(ctx, algorithm)
}
}
return next.HandleInitialize(ctx, in)
}
// inputAlgorithmKey is the key set on context used to identify, retrieves the
// request checksum algorithm if present on the context.
type inputAlgorithmKey struct{}
// setContextInputAlgorithm sets the request checksum algorithm on the context.
//
// Scoped to stack values.
func setContextInputAlgorithm(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, inputAlgorithmKey{}, value)
}
// getContextInputAlgorithm returns the checksum algorithm from the context if
// one was specified. Empty string is returned if one is not specified.
//
// Scoped to stack values.
func getContextInputAlgorithm(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, inputAlgorithmKey{}).(string)
return v
}
type setupOutputContext struct {
// GetValidationMode is a function to get the checksum validation
// mode of the output payload from the input parameters.
//
// Given the input parameter value, the function must return the validation
// mode and true, or false if no mode is specified.
GetValidationMode func(interface{}) (string, bool)
}
// ID for the middleware
func (m *setupOutputContext) ID() string {
return "AWSChecksum:SetupOutputContext"
}
// HandleInitialize initialization middleware that setups up the checksum
// context based on the input parameters provided in the stack.
func (m *setupOutputContext) HandleInitialize(
ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
// Check if validation mode is specified.
if m.GetValidationMode != nil {
// check is input resource has a checksum algorithm
mode, ok := m.GetValidationMode(in.Parameters)
if ok && len(mode) != 0 {
ctx = setContextOutputValidationMode(ctx, mode)
}
}
return next.HandleInitialize(ctx, in)
}
// outputValidationModeKey is the key set on context used to identify if
// output checksum validation is enabled.
type outputValidationModeKey struct{}
// setContextOutputValidationMode sets the request checksum
// algorithm on the context.
//
// Scoped to stack values.
func setContextOutputValidationMode(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, outputValidationModeKey{}, value)
}
// getContextOutputValidationMode returns response checksum validation state,
// if one was specified. Empty string is returned if one is not specified.
//
// Scoped to stack values.
func getContextOutputValidationMode(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, outputValidationModeKey{}).(string)
return v
}

View File

@ -0,0 +1,131 @@
package checksum
import (
"context"
"fmt"
"strings"
"github.com/aws/smithy-go"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// outputValidationAlgorithmsUsedKey is the metadata key for indexing the algorithms
// that were used, by the middleware's validation.
type outputValidationAlgorithmsUsedKey struct{}
// GetOutputValidationAlgorithmsUsed returns the checksum algorithms used
// stored in the middleware Metadata. Returns false if no algorithms were
// stored in the Metadata.
func GetOutputValidationAlgorithmsUsed(m middleware.Metadata) ([]string, bool) {
vs, ok := m.Get(outputValidationAlgorithmsUsedKey{}).([]string)
return vs, ok
}
// SetOutputValidationAlgorithmsUsed stores the checksum algorithms used in the
// middleware Metadata.
func SetOutputValidationAlgorithmsUsed(m *middleware.Metadata, vs []string) {
m.Set(outputValidationAlgorithmsUsedKey{}, vs)
}
// validateOutputPayloadChecksum middleware computes payload checksum of the
// received response and validates with checksum returned by the service.
type validateOutputPayloadChecksum struct {
// Algorithms represents a priority-ordered list of valid checksum
// algorithm that should be validated when present in HTTP response
// headers.
Algorithms []Algorithm
// IgnoreMultipartValidation indicates multipart checksums ending with "-#"
// will be ignored.
IgnoreMultipartValidation bool
// When set the middleware will log when output does not have checksum or
// algorithm to validate.
LogValidationSkipped bool
// When set the middleware will log when the output contains a multipart
// checksum that was, skipped and not validated.
LogMultipartValidationSkipped bool
}
func (m *validateOutputPayloadChecksum) ID() string {
return "AWSChecksum:ValidateOutputPayloadChecksum"
}
// HandleDeserialize is a Deserialize middleware that wraps the HTTP response
// body with an io.ReadCloser that will validate the its checksum.
func (m *validateOutputPayloadChecksum) HandleDeserialize(
ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
if err != nil {
return out, metadata, err
}
// If there is no validation mode specified nothing is supported.
if mode := getContextOutputValidationMode(ctx); mode != "ENABLED" {
return out, metadata, err
}
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{
Err: fmt.Errorf("unknown transport type %T", out.RawResponse),
}
}
var expectedChecksum string
var algorithmToUse Algorithm
for _, algorithm := range m.Algorithms {
value := response.Header.Get(AlgorithmHTTPHeader(algorithm))
if len(value) == 0 {
continue
}
expectedChecksum = value
algorithmToUse = algorithm
}
// TODO this must validate the validation mode is set to enabled.
logger := middleware.GetLogger(ctx)
// Skip validation if no checksum algorithm or checksum is available.
if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 {
if m.LogValidationSkipped {
// TODO this probably should have more information about the
// operation output that won't be validated.
logger.Logf(logging.Warn,
"Response has no supported checksum. Not validating response payload.")
}
return out, metadata, nil
}
// Ignore multipart validation
if m.IgnoreMultipartValidation && strings.Contains(expectedChecksum, "-") {
if m.LogMultipartValidationSkipped {
// TODO this probably should have more information about the
// operation output that won't be validated.
logger.Logf(logging.Warn, "Skipped validation of multipart checksum.")
}
return out, metadata, nil
}
body, err := newValidateChecksumReader(response.Body, algorithmToUse, expectedChecksum)
if err != nil {
return out, metadata, fmt.Errorf("failed to create checksum validation reader, %w", err)
}
response.Body = body
// Update the metadata to include the set of the checksum algorithms that
// will be validated.
SetOutputValidationAlgorithmsUsed(&metadata, []string{
string(algorithmToUse),
})
return out, metadata, nil
}

View File

@ -1,3 +1,55 @@
# v1.9.6 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.5 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.4 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.8.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.6.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.5.2 (2021-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.5.1 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.5.0 (2021-11-06)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version

View File

@ -3,4 +3,4 @@
package presignedurl
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.5.0"
const goModuleVersion = "1.9.6"

View File

@ -1,3 +1,55 @@
# v1.13.6 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.5 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.4 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.2 (2021-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.1 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.0 (2021-11-06)
* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.

View File

@ -3,4 +3,4 @@
package s3shared
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.9.0"
const goModuleVersion = "1.13.6"

View File

@ -1,3 +1,94 @@
# v1.26.11 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.10 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.9 (2022-05-06)
* No change notes available for this release.
# v1.26.8 (2022-05-03)
* **Documentation**: Documentation only update for doc bug fixes for the S3 API docs.
# v1.26.7 (2022-04-27)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.6 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.5 (2022-04-12)
* **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly.
# v1.26.4 (2022-04-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.0 (2022-02-24)
* **Feature**: API client updated
* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options.
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.24.1 (2022-01-28)
* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR.
# v1.24.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.23.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Documentation**: API client updated
* **Dependency Update**: Updated to the latest SDK module versions
# v1.22.0 (2021-12-21)
* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens.
* **Feature**: Updated to latest service endpoints
# v1.21.0 (2021-12-02)
* **Feature**: API client updated
* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514))
* **Dependency Update**: Updated to the latest SDK module versions
# v1.20.0 (2021-11-30)
* **Feature**: API client updated
# v1.19.1 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.19.0 (2021-11-12)
* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature.

View File

@ -6,22 +6,25 @@ import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/defaults"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
"github.com/aws/aws-sdk-go-v2/internal/v4a"
acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding"
internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"
"github.com/aws/aws-sdk-go-v2/service/internal/s3shared"
s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/internal/v4a"
smithy "github.com/aws/smithy-go"
smithydocument "github.com/aws/smithy-go/document"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"net"
"net/http"
"time"
)
@ -43,6 +46,8 @@ func New(options Options, optFns ...func(*Options)) *Client {
resolveDefaultLogger(&options)
setResolvedDefaultsMode(&options)
resolveRetryer(&options)
resolveHTTPClient(&options)
@ -78,6 +83,10 @@ type Options struct {
// The credentials object to use when signing requests.
Credentials aws.CredentialsProvider
// The configuration DefaultsMode that the SDK should use when constructing the
// clients initial default settings.
DefaultsMode aws.DefaultsMode
// Allows you to disable S3 Multi-Region access points feature.
DisableMultiRegionAccessPoints bool
@ -96,10 +105,36 @@ type Options struct {
// The region to send requests to. (Required)
Region string
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
// per operation call's retry max attempts. When creating a new API Clients this
// member will only be used if the Retryer Options member is nil. This value will
// be ignored if Retryer is not nil. If specified in an operation call's functional
// options with a value that is different than the constructed client's Options,
// the Client's Retryer will be wrapped to use the operation's specific
// RetryMaxAttempts value.
RetryMaxAttempts int
// RetryMode specifies the retry mode the API client will be created with, if
// Retryer option is not also specified. When creating a new API Clients this
// member will only be used if the Retryer Options member is nil. This value will
// be ignored if Retryer is not nil. Currently does not support per operation call
// overrides, may in the future.
RetryMode aws.RetryMode
// Retryer guides how HTTP requests should be retried in case of recoverable
// failures. When nil the API client will use a default retryer.
// failures. When nil the API client will use a default retryer. The kind of
// default retry created by the API client can be changed with the RetryMode
// option.
Retryer aws.Retryer
// The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
// to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You
// should not populate this structure programmatically, or rely on the values here
// within your applications.
RuntimeEnvironment aws.RuntimeEnvironment
// Allows you to enable arn region support for the service.
UseARNRegion bool
@ -126,6 +161,12 @@ type Options struct {
// Signature Version 4a (SigV4a) Signer
httpSignerV4a httpSignerV4a
// The initial DefaultsMode used when the client options were constructed. If the
// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
// value was at that point in time. Currently does not support per operation call
// overrides, may in the future.
resolvedDefaultsMode aws.DefaultsMode
// The HTTP client to invoke API calls with. Defaults to client's default HTTP
// implementation if nil.
HTTPClient HTTPClient
@ -156,6 +197,7 @@ func (o Options) Copy() Options {
to := o
to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
copy(to.APIOptions, o.APIOptions)
return to
}
func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
@ -168,6 +210,8 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf
setSafeEventStreamClientLogMode(&options, opID)
finalizeRetryMaxAttemptOptions(&options, *c)
finalizeClientEndpointResolverOptions(&options)
resolveCredentialProvider(&options)
@ -209,10 +253,27 @@ func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
return middleware.AddSetLoggerMiddleware(stack, o.Logger)
}
func setResolvedDefaultsMode(o *Options) {
if len(o.resolvedDefaultsMode) > 0 {
return
}
var mode aws.DefaultsMode
mode.SetFromString(string(o.DefaultsMode))
if mode == aws.DefaultsModeAuto {
mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
}
o.resolvedDefaultsMode = mode
}
// NewFromConfig returns a new client from the provided config.
func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
opts := Options{
Region: cfg.Region,
DefaultsMode: cfg.DefaultsMode,
RuntimeEnvironment: cfg.RuntimeEnvironment,
HTTPClient: cfg.HTTPClient,
Credentials: cfg.Credentials,
APIOptions: cfg.APIOptions,
@ -220,6 +281,8 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
ClientLogMode: cfg.ClientLogMode,
}
resolveAWSRetryerProvider(cfg, &opts)
resolveAWSRetryMaxAttempts(cfg, &opts)
resolveAWSRetryMode(cfg, &opts)
resolveAWSEndpointResolver(cfg, &opts)
resolveUseARNRegion(cfg, &opts)
resolveUseDualStackEndpoint(cfg, &opts)
@ -228,17 +291,71 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
}
func resolveHTTPClient(o *Options) {
var buildable *awshttp.BuildableClient
if o.HTTPClient != nil {
var ok bool
buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
if !ok {
return
}
o.HTTPClient = awshttp.NewBuildableClient()
} else {
buildable = awshttp.NewBuildableClient()
}
modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
if err == nil {
buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
dialer.Timeout = dialerTimeout
}
})
buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
transport.TLSHandshakeTimeout = tlsHandshakeTimeout
}
})
}
o.HTTPClient = buildable
}
func resolveRetryer(o *Options) {
if o.Retryer != nil {
return
}
o.Retryer = retry.NewStandard()
if len(o.RetryMode) == 0 {
modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
if err == nil {
o.RetryMode = modeConfig.RetryMode
}
}
if len(o.RetryMode) == 0 {
o.RetryMode = aws.RetryModeStandard
}
var standardOptions []func(*retry.StandardOptions)
if v := o.RetryMaxAttempts; v != 0 {
standardOptions = append(standardOptions, func(so *retry.StandardOptions) {
so.MaxAttempts = v
})
}
switch o.RetryMode {
case aws.RetryModeAdaptive:
var adaptiveOptions []func(*retry.AdaptiveModeOptions)
if len(standardOptions) != 0 {
adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) {
ao.StandardOptions = append(ao.StandardOptions, standardOptions...)
})
}
o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...)
default:
o.Retryer = retry.NewStandard(standardOptions...)
}
}
func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
@ -248,8 +365,29 @@ func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
o.Retryer = cfg.Retryer()
}
func resolveAWSRetryMode(cfg aws.Config, o *Options) {
if len(cfg.RetryMode) == 0 {
return
}
o.RetryMode = cfg.RetryMode
}
func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
if cfg.RetryMaxAttempts == 0 {
return
}
o.RetryMaxAttempts = cfg.RetryMaxAttempts
}
func finalizeRetryMaxAttemptOptions(o *Options, client Client) {
if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
return
}
o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
}
func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
if cfg.EndpointResolver == nil {
if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
return
}
o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver())
@ -344,6 +482,7 @@ func resolveCredentialProvider(o *Options) {
if o.Credentials == nil {
return
}
if _, ok := o.Credentials.(v4a.CredentialsProvider); ok {
return
}
@ -351,11 +490,11 @@ func resolveCredentialProvider(o *Options) {
switch o.Credentials.(type) {
case aws.AnonymousCredentials, *aws.AnonymousCredentials:
return
}
o.Credentials = &v4a.SymmetricCredentialAdaptor{SymmetricProvider: o.Credentials}
}
func swapWithCustomHTTPSignerMiddleware(stack *middleware.Stack, o Options) error {
mw := s3cust.NewSignHTTPRequestMiddleware(s3cust.SignHTTPRequestMiddlewareOptions{
CredentialsProvider: o.Credentials,
@ -363,11 +502,14 @@ func swapWithCustomHTTPSignerMiddleware(stack *middleware.Stack, o Options) erro
V4aSigner: o.httpSignerV4a,
LogSigning: o.ClientLogMode.IsSigning(),
})
return s3cust.RegisterSigningMiddleware(stack, mw)
}
type httpSignerV4a interface {
SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*v4a.SignerOptions)) error
SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash,
service string, regionSet []string, signingTime time.Time,
optFns ...func(*v4a.SignerOptions)) error
}
func resolveHTTPSignerV4a(o *Options) {
@ -389,6 +531,53 @@ func addMetadataRetrieverMiddleware(stack *middleware.Stack) error {
return s3shared.AddMetadataRetrieverMiddleware(stack)
}
// ComputedInputChecksumsMetadata provides information about the algorithms used to
// compute the checksum(s) of the input payload.
type ComputedInputChecksumsMetadata struct {
// ComputedChecksums is a map of algorithm name to checksum value of the computed
// input payload's checksums.
ComputedChecksums map[string]string
}
// GetComputedInputChecksumsMetadata retrieves from the result metadata the map of
// algorithms and input payload checksums values.
func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChecksumsMetadata, bool) {
values, ok := internalChecksum.GetComputedInputChecksums(m)
if !ok {
return ComputedInputChecksumsMetadata{}, false
}
return ComputedInputChecksumsMetadata{
ComputedChecksums: values,
}, true
}
// ChecksumValidationMetadata contains metadata such as the checksum algorithm used
// for data integrity validation.
type ChecksumValidationMetadata struct {
// AlgorithmsUsed is the set of the checksum algorithms used to validate the
// response payload. The response payload must be completely read in order for the
// checksum validation to be performed. An error is returned by the operation
// output's response io.ReadCloser if the computed checksums are invalid.
AlgorithmsUsed []string
}
// GetChecksumValidationMetadata returns the set of algorithms that will be used to
// validate the response payload with. The response payload must be completely read
// in order for the checksum validation to be performed. An error is returned by
// the operation output's response io.ReadCloser if the computed checksums are
// invalid. Returns false if no checksum algorithm used metadata was found.
func GetChecksumValidationMetadata(m middleware.Metadata) (ChecksumValidationMetadata, bool) {
values, ok := internalChecksum.GetOutputValidationAlgorithmsUsed(m)
if !ok {
return ChecksumValidationMetadata{}, false
}
return ChecksumValidationMetadata{
AlgorithmsUsed: append(make([]string, 0, len(values)), values...),
}, true
}
// nopGetBucketAccessor is no-op accessor for operation that don't support bucket
// member as input
func nopGetBucketAccessor(input interface{}) (*string, bool) {

View File

@ -74,9 +74,9 @@ type AbortMultipartUploadInput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
@ -94,13 +94,14 @@ type AbortMultipartUploadInput struct {
UploadId *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View File

@ -31,10 +31,13 @@ import (
// determine whether the request succeeded. Note that if CompleteMultipartUpload
// fails, applications should be prepared to retry the failed requests. For more
// information, see Amazon S3 Error Best Practices
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). For
// more information about multipart uploads, see Uploading Objects Using Multipart
// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
// For information about permissions required to use the multipart upload API, see
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). You
// cannot use Content-Type: application/x-www-form-urlencoded with Complete
// Multipart Upload requests. Also, if you do not provide a Content-Type header,
// CompleteMultipartUpload returns a 200 OK response. For more information about
// multipart uploads, see Uploading Objects Using Multipart Upload
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For
// information about permissions required to use the multipart upload API, see
// Multipart Upload and Permissions
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
// CompleteMultipartUpload has the following special errors:
@ -124,9 +127,9 @@ type CompleteMultipartUploadInput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
@ -143,8 +146,41 @@ type CompleteMultipartUploadInput struct {
// This member is required.
UploadId *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
// base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
// Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
// base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
// Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
// base64-encoded, 160-bit SHA-1 digest of the object. For more information, see
// Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
// base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
// Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// The container for the multipart upload request information.
@ -152,12 +188,33 @@ type CompleteMultipartUploadInput struct {
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
// The server-side encryption (SSE) algorithm used to encrypt the object. This
// parameter is needed only when the object was created using a checksum algorithm.
// For more information, see Protecting data using SSE-C keys
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
SSECustomerAlgorithm *string
// The server-side encryption (SSE) customer managed key. This parameter is needed
// only when the object was created using a checksum algorithm. For more
// information, see Protecting data using SSE-C keys
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
SSECustomerKey *string
// The MD5 server-side encryption (SSE) customer managed key. This parameter is
// needed only when the object was created using a checksum algorithm. For more
// information, see Protecting data using SSE-C keys
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
SSECustomerKeyMD5 *string
noSmithyDocumentSerde
}
@ -176,9 +233,9 @@ type CompleteMultipartUploadOutput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
Bucket *string
@ -187,16 +244,51 @@ type CompleteMultipartUploadOutput struct {
// encryption with Amazon Web Services KMS (SSE-KMS).
BucketKeyEnabled bool
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string
// Entity tag that identifies the newly created object's data. Objects with
// different object data will have different entity tags. The entity tag is an
// opaque string. The entity tag may or may not be an MD5 digest of the object
// data. If the entity tag is not an MD5 digest of the object data, it will contain
// one or more nonhexadecimal characters and/or will consist of less than 32 or
// more than 32 hexadecimal digits.
// more than 32 hexadecimal digits. For more information about how the entity tag
// is calculated, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ETag *string
// If the object expiration is configured, this will contain the expiration date
// (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
// (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded.
Expiration *string
// The object key of the newly created object.

Some files were not shown because too many files have changed in this diff Show More