mirror of
https://github.com/crazy-max/diun.git
synced 2025-12-24 06:28:13 +01:00
vendor: update to github.com/hashicorp/nomad/api v1.10.4
This commit is contained in:
3
go.mod
3
go.mod
@@ -21,7 +21,7 @@ require (
|
|||||||
github.com/eclipse/paho.mqtt.golang v1.5.0
|
github.com/eclipse/paho.mqtt.golang v1.5.0
|
||||||
github.com/go-gomail/gomail v0.0.0-20160411212932-81ebce5c23df
|
github.com/go-gomail/gomail v0.0.0-20160411212932-81ebce5c23df
|
||||||
github.com/go-playground/validator/v10 v10.27.0
|
github.com/go-playground/validator/v10 v10.27.0
|
||||||
github.com/hashicorp/nomad/api v0.0.0-20231213195942-64e3dca9274b // v1.7.2
|
github.com/hashicorp/nomad/api v0.0.0-20250812204832-62b195aaa535 // v1.10.4
|
||||||
github.com/jedib0t/go-pretty/v6 v6.6.8
|
github.com/jedib0t/go-pretty/v6 v6.6.8
|
||||||
github.com/matcornic/hermes/v2 v2.1.0
|
github.com/matcornic/hermes/v2 v2.1.0
|
||||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
||||||
@@ -135,7 +135,6 @@ require (
|
|||||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||||
golang.org/x/crypto v0.40.0 // indirect
|
golang.org/x/crypto v0.40.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
|
|
||||||
golang.org/x/net v0.41.0 // indirect
|
golang.org/x/net v0.41.0 // indirect
|
||||||
golang.org/x/oauth2 v0.30.0 // indirect
|
golang.org/x/oauth2 v0.30.0 // indirect
|
||||||
golang.org/x/sync v0.16.0 // indirect
|
golang.org/x/sync v0.16.0 // indirect
|
||||||
|
|||||||
12
go.sum
12
go.sum
@@ -180,8 +180,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
|
|||||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||||
github.com/hashicorp/nomad/api v0.0.0-20231213195942-64e3dca9274b h1:R1UDhkwGltpSPY9bCBBxIMQd+NY9BkN0vFHnJo/8o8w=
|
github.com/hashicorp/nomad/api v0.0.0-20250812204832-62b195aaa535 h1:DZhOjjH4Gf6TOQYCDYwWzmnY9/jKeUWnaKEdAybhnEM=
|
||||||
github.com/hashicorp/nomad/api v0.0.0-20231213195942-64e3dca9274b/go.mod h1:ijDwa6o1uG1jFSq6kERiX2PamKGpZzTmo0XOFNeFZgw=
|
github.com/hashicorp/nomad/api v0.0.0-20250812204832-62b195aaa535/go.mod h1:y4olHzVXiQolzyk6QD/gqJxQTnnchlTf/QtczFFKwOI=
|
||||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
|
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
|
||||||
@@ -240,8 +240,6 @@ github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwX
|
|||||||
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
|
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
|
||||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
|
|
||||||
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ=
|
github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ=
|
||||||
@@ -317,8 +315,8 @@ github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6
|
|||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk=
|
github.com/shoenig/test v1.12.1 h1:mLHfnMv7gmhhP44WrvT+nKSxKkPDiNkIuHGdIGI9RLU=
|
||||||
github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI=
|
github.com/shoenig/test v1.12.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI=
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
@@ -386,8 +384,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
|
|
||||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
|
|||||||
8
vendor/github.com/hashicorp/nomad/api/.copywrite.hcl
generated
vendored
8
vendor/github.com/hashicorp/nomad/api/.copywrite.hcl
generated
vendored
@@ -2,7 +2,11 @@ schema_version = 1
|
|||||||
|
|
||||||
project {
|
project {
|
||||||
license = "MPL-2.0"
|
license = "MPL-2.0"
|
||||||
copyright_year = 2023
|
copyright_year = 2024
|
||||||
|
|
||||||
header_ignore = []
|
header_ignore = [
|
||||||
|
// Enterprise files do not fall under the open source licensing. CE-ENT
|
||||||
|
// merge conflicts might happen here, please be sure to put new CE
|
||||||
|
// exceptions above this comment.
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
133
vendor/github.com/hashicorp/nomad/api/acl.go
generated
vendored
133
vendor/github.com/hashicorp/nomad/api/acl.go
generated
vendored
@@ -67,6 +67,16 @@ func (a *ACLPolicies) Info(policyName string, q *QueryOptions) (*ACLPolicy, *Que
|
|||||||
return &resp, wm, nil
|
return &resp, wm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Self is used to query policies attached to a workload identity
|
||||||
|
func (a *ACLPolicies) Self(q *QueryOptions) ([]*ACLPolicyListStub, *QueryMeta, error) {
|
||||||
|
var resp []*ACLPolicyListStub
|
||||||
|
wm, err := a.client.query("/v1/acl/policy/self", &resp, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return resp, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ACLTokens is used to query the ACL token endpoints.
|
// ACLTokens is used to query the ACL token endpoints.
|
||||||
type ACLTokens struct {
|
type ACLTokens struct {
|
||||||
client *Client
|
client *Client
|
||||||
@@ -509,6 +519,7 @@ func (a *ACLAuth) Login(req *ACLLoginRequest, q *WriteOptions) (*ACLToken, *Writ
|
|||||||
type ACLPolicyListStub struct {
|
type ACLPolicyListStub struct {
|
||||||
Name string
|
Name string
|
||||||
Description string
|
Description string
|
||||||
|
JobACL *JobACL
|
||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
}
|
}
|
||||||
@@ -826,6 +837,13 @@ type ACLAuthMethodConfig struct {
|
|||||||
OIDCClientID string
|
OIDCClientID string
|
||||||
// The OAuth Client Secret configured with the OIDC provider
|
// The OAuth Client Secret configured with the OIDC provider
|
||||||
OIDCClientSecret string
|
OIDCClientSecret string
|
||||||
|
// Optionally send a signed JWT ("private key jwt") as a client assertion
|
||||||
|
// to the OIDC provider
|
||||||
|
OIDCClientAssertion *OIDCClientAssertion
|
||||||
|
// Enable S256 PKCE challenge verification.
|
||||||
|
OIDCEnablePKCE bool
|
||||||
|
// Disable claims from the OIDC UserInfo endpoint
|
||||||
|
OIDCDisableUserInfo bool
|
||||||
// List of OIDC scopes
|
// List of OIDC scopes
|
||||||
OIDCScopes []string
|
OIDCScopes []string
|
||||||
// List of auth claims that are valid for login
|
// List of auth claims that are valid for login
|
||||||
@@ -855,6 +873,9 @@ type ACLAuthMethodConfig struct {
|
|||||||
// (value).
|
// (value).
|
||||||
ClaimMappings map[string]string
|
ClaimMappings map[string]string
|
||||||
ListClaimMappings map[string]string
|
ListClaimMappings map[string]string
|
||||||
|
// Enables logging of claims and binding-rule evaluations when
|
||||||
|
// debug level logging is enabled.
|
||||||
|
VerboseLogging bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON implements the json.Marshaler interface and allows
|
// MarshalJSON implements the json.Marshaler interface and allows
|
||||||
@@ -945,6 +966,118 @@ func (c *ACLAuthMethodConfig) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OIDCClientAssertionKeySource specifies what key material should be used
|
||||||
|
// to sign an OIDCClientAssertion.
|
||||||
|
type OIDCClientAssertionKeySource string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// OIDCKeySourceNomad signs the OIDCClientAssertion JWT with Nomad's
|
||||||
|
// internal private key. Its public key is exposed at /.well-known/jwks.json
|
||||||
|
OIDCKeySourceNomad OIDCClientAssertionKeySource = "nomad"
|
||||||
|
// OIDCKeySourcePrivateKey signs the OIDCClientAssertion JWT with
|
||||||
|
// key material defined in OIDCClientAssertion.PrivateKey
|
||||||
|
OIDCKeySourcePrivateKey OIDCClientAssertionKeySource = "private_key"
|
||||||
|
// OIDCKeySourceClientSecret signs the OIDCClientAssertion JWT with
|
||||||
|
// ACLAuthMethod.ClientSecret
|
||||||
|
OIDCKeySourceClientSecret OIDCClientAssertionKeySource = "client_secret"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OIDCClientAssertion (a.k.a private_key_jwt) is used to send
|
||||||
|
// a client_assertion along with an OIDC token request.
|
||||||
|
// Reference: https://oauth.net/private-key-jwt/
|
||||||
|
// See also: structs.OIDCClientAssertion
|
||||||
|
type OIDCClientAssertion struct {
|
||||||
|
// Audience is/are who will be processing the assertion.
|
||||||
|
// Defaults to the parent `ACLAuthMethodConfig`'s `OIDCDiscoveryURL`
|
||||||
|
Audience []string
|
||||||
|
|
||||||
|
// KeySource is where to get the private key to sign the JWT.
|
||||||
|
// It is the one field that *must* be set to enable client assertions.
|
||||||
|
// Available sources:
|
||||||
|
// - "nomad": Use current active key in Nomad's keyring
|
||||||
|
// - "private_key": Use key material in the `PrivateKey` field
|
||||||
|
// - "client_secret": Use the `OIDCClientSecret` inherited from the parent
|
||||||
|
// `ACLAuthMethodConfig` as an HMAC key
|
||||||
|
KeySource OIDCClientAssertionKeySource
|
||||||
|
|
||||||
|
// KeyAlgorithm is the key's algorithm.
|
||||||
|
// Its default values are based on the `KeySource`:
|
||||||
|
// - "nomad": "RS256" (from Nomad's keyring, must not be changed)
|
||||||
|
// - "private_key": "RS256" (must be RS256, RS384, or RS512)
|
||||||
|
// - "client_secret": "HS256" (must be HS256, HS384, or HS512)
|
||||||
|
KeyAlgorithm string
|
||||||
|
|
||||||
|
// PrivateKey contains external key material provided by users.
|
||||||
|
// `KeySource` must be "private_key" to enable this.
|
||||||
|
PrivateKey *OIDCClientAssertionKey
|
||||||
|
|
||||||
|
// ExtraHeaders are added to the JWT headers, alongside "kid" and "type"
|
||||||
|
// Setting the "kid" header here is not allowed; use `PrivateKey.KeyID`.
|
||||||
|
ExtraHeaders map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// OIDCClientAssertionKeyIDHeader is the header that the OIDC provider will use
|
||||||
|
// to look up the certificate or public key that it needs to verify the
|
||||||
|
// private key JWT signature.
|
||||||
|
type OIDCClientAssertionKeyIDHeader string
|
||||||
|
|
||||||
|
const (
|
||||||
|
OIDCClientAssertionHeaderKid OIDCClientAssertionKeyIDHeader = "kid"
|
||||||
|
OIDCClientAssertionHeaderX5t OIDCClientAssertionKeyIDHeader = "x5t"
|
||||||
|
OIDCClientAssertionHeaderX5tS256 OIDCClientAssertionKeyIDHeader = "x5t#S256"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OIDCClientAssertionKey contains key material provided by users for Nomad
|
||||||
|
// to use to sign the private key JWT.
|
||||||
|
//
|
||||||
|
// PemKey or PemKeyFile must contain an RSA private key in PEM format.
|
||||||
|
//
|
||||||
|
// PemCert, PemCertFile may contain an x509 certificate created with
|
||||||
|
// the Key, used to derive the KeyID. Alternatively, KeyID may be set manually.
|
||||||
|
//
|
||||||
|
// PemKeyFile and PemCertFile, if set, must be an absolute path to a file
|
||||||
|
// present on disk on any Nomad servers that may become cluster leaders.
|
||||||
|
type OIDCClientAssertionKey struct {
|
||||||
|
// PemKey is an RSA private key, in pem format. It is used to sign the JWT.
|
||||||
|
// Mutually exclusive with `PemKeyFile`.
|
||||||
|
PemKey string
|
||||||
|
// PemKeyFile is an absolute path to a private key on Nomad servers' disk,
|
||||||
|
// in pem format. It is used to sign the JWT.
|
||||||
|
// Mutually exclusive with `PemKey`.
|
||||||
|
PemKeyFile string
|
||||||
|
|
||||||
|
// KeyIDHeader is which header the provider will use to find the
|
||||||
|
// public key to verify the signed JWT. Its default values vary
|
||||||
|
// based on which of the other required fields is set:
|
||||||
|
// - KeyID: "kid"
|
||||||
|
// - PemCert: "x5t#S256"
|
||||||
|
// - PemCertFile: "x5t#S256"
|
||||||
|
//
|
||||||
|
// Refer to the JWS RFC for information on these headers:
|
||||||
|
// - "kid": https://datatracker.ietf.org/doc/html/rfc7515#section-4.1.4
|
||||||
|
// - "x5t": https://datatracker.ietf.org/doc/html/rfc7515#section-4.1.7
|
||||||
|
// - "x5t#S256": https://datatracker.ietf.org/doc/html/rfc7515#section-4.1.8
|
||||||
|
//
|
||||||
|
// If you need to set some other header not supported here,
|
||||||
|
// you may use OIDCClientAssertion.ExtraHeaders.
|
||||||
|
KeyIDHeader OIDCClientAssertionKeyIDHeader
|
||||||
|
// KeyID may be set manually and becomes the "kid" header.
|
||||||
|
// Mutually exclusive with `PemCert` and `PemCertFile`.
|
||||||
|
// Allowed `KeyIDHeader` values: "kid" (the default)
|
||||||
|
KeyID string
|
||||||
|
// PemCert is an x509 certificate, signed by the private key or a CA,
|
||||||
|
// in pem format. It is used to derive an x5t#S256 (or x5t) header.
|
||||||
|
// Mutually exclusive with `PemCertFile` and `KeyID`.
|
||||||
|
// Allowed `KeyIDHeader` values: "x5t", "x5t#S256" (default "x5t#S256")
|
||||||
|
PemCert string
|
||||||
|
// PemCertFile is an absolute path to an x509 certificate on Nomad servers'
|
||||||
|
// disk, signed by the private key or a CA, in pem format.
|
||||||
|
// It is used to derive an x5t#S256 (or x5t) header.
|
||||||
|
// Mutually exclusive with `PemCert` and `KeyID`.
|
||||||
|
// Allowed `KeyIDHeader` values: "x5t", "x5t#S256" (default "x5t#S256")
|
||||||
|
PemCertFile string
|
||||||
|
}
|
||||||
|
|
||||||
// ACLAuthMethodListStub is the stub object returned when performing a listing
|
// ACLAuthMethodListStub is the stub object returned when performing a listing
|
||||||
// of ACL auth-methods. It is intentionally minimal due to the unauthenticated
|
// of ACL auth-methods. It is intentionally minimal due to the unauthenticated
|
||||||
// nature of the list endpoint.
|
// nature of the list endpoint.
|
||||||
|
|||||||
18
vendor/github.com/hashicorp/nomad/api/agent.go
generated
vendored
18
vendor/github.com/hashicorp/nomad/api/agent.go
generated
vendored
@@ -156,7 +156,7 @@ func (a *Agent) Members() (*ServerMembers, error) {
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Members is used to query all of the known server members
|
// MembersOpts is used to query all of the known server members
|
||||||
// with the ability to set QueryOptions
|
// with the ability to set QueryOptions
|
||||||
func (a *Agent) MembersOpts(opts *QueryOptions) (*ServerMembers, error) {
|
func (a *Agent) MembersOpts(opts *QueryOptions) (*ServerMembers, error) {
|
||||||
var resp *ServerMembers
|
var resp *ServerMembers
|
||||||
@@ -302,15 +302,27 @@ func (a *Agent) Host(serverID, nodeID string, q *QueryOptions) (*HostDataRespons
|
|||||||
// Monitor returns a channel which will receive streaming logs from the agent
|
// Monitor returns a channel which will receive streaming logs from the agent
|
||||||
// Providing a non-nil stopCh can be used to close the connection and stop log streaming
|
// Providing a non-nil stopCh can be used to close the connection and stop log streaming
|
||||||
func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) {
|
func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) {
|
||||||
|
frames, errCh := a.monitorHelper(stopCh, q, "/v1/agent/monitor")
|
||||||
|
return frames, errCh
|
||||||
|
}
|
||||||
|
|
||||||
|
// MonitorExport returns a channel which will receive streaming logs from the agent
|
||||||
|
// Providing a non-nil stopCh can be used to close the connection and stop log streaming
|
||||||
|
func (a *Agent) MonitorExport(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) {
|
||||||
|
frames, errCh := a.monitorHelper(stopCh, q, "/v1/agent/monitor/export")
|
||||||
|
return frames, errCh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Agent) monitorHelper(stopCh <-chan struct{}, q *QueryOptions, path string) (chan *StreamFrame, chan error) {
|
||||||
errCh := make(chan error, 1)
|
errCh := make(chan error, 1)
|
||||||
r, err := a.client.newRequest("GET", "/v1/agent/monitor")
|
r, err := a.client.newRequest("GET", path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- err
|
errCh <- err
|
||||||
return nil, errCh
|
return nil, errCh
|
||||||
}
|
}
|
||||||
|
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
_, resp, err := requireOK(a.client.doRequest(r))
|
_, resp, err := requireOK(a.client.doRequest(r)) //nolint:bodyclose
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- err
|
errCh <- err
|
||||||
return nil, errCh
|
return nil, errCh
|
||||||
|
|||||||
41
vendor/github.com/hashicorp/nomad/api/allocations.go
generated
vendored
41
vendor/github.com/hashicorp/nomad/api/allocations.go
generated
vendored
@@ -235,6 +235,27 @@ func (a *Allocations) Signal(alloc *Allocation, q *QueryOptions, task, signal st
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetPauseState sets the schedule behavior of one task in the allocation.
|
||||||
|
func (a *Allocations) SetPauseState(alloc *Allocation, q *QueryOptions, task, state string) error {
|
||||||
|
req := AllocPauseRequest{
|
||||||
|
ScheduleState: state,
|
||||||
|
Task: task,
|
||||||
|
}
|
||||||
|
var resp GenericResponse
|
||||||
|
_, err := a.client.putQuery("/v1/client/allocation/"+alloc.ID+"/pause", &req, &resp, q)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPauseState gets the schedule behavior of one task in the allocation.
|
||||||
|
//
|
||||||
|
// The ?task=<task> query parameter must be set.
|
||||||
|
func (a *Allocations) GetPauseState(alloc *Allocation, q *QueryOptions, task string) (string, *QueryMeta, error) {
|
||||||
|
var resp AllocGetPauseResponse
|
||||||
|
qm, err := a.client.query("/v1/client/allocation/"+alloc.ID+"/pause?task="+task, &resp, q)
|
||||||
|
state := resp.ScheduleState
|
||||||
|
return state, qm, err
|
||||||
|
}
|
||||||
|
|
||||||
// Services is used to return a list of service registrations associated to the
|
// Services is used to return a list of service registrations associated to the
|
||||||
// specified allocID.
|
// specified allocID.
|
||||||
func (a *Allocations) Services(allocID string, q *QueryOptions) ([]*ServiceRegistration, *QueryMeta, error) {
|
func (a *Allocations) Services(allocID string, q *QueryOptions) ([]*ServiceRegistration, *QueryMeta, error) {
|
||||||
@@ -286,6 +307,7 @@ type AllocationMetric struct {
|
|||||||
NodesEvaluated int
|
NodesEvaluated int
|
||||||
NodesFiltered int
|
NodesFiltered int
|
||||||
NodesInPool int
|
NodesInPool int
|
||||||
|
NodePool string
|
||||||
NodesAvailable map[string]int
|
NodesAvailable map[string]int
|
||||||
ClassFiltered map[string]int
|
ClassFiltered map[string]int
|
||||||
ConstraintFiltered map[string]int
|
ConstraintFiltered map[string]int
|
||||||
@@ -414,6 +436,7 @@ type AllocDeploymentStatus struct {
|
|||||||
type AllocNetworkStatus struct {
|
type AllocNetworkStatus struct {
|
||||||
InterfaceName string
|
InterfaceName string
|
||||||
Address string
|
Address string
|
||||||
|
AddressIPv6 string
|
||||||
DNS *DNSConfig
|
DNS *DNSConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -517,6 +540,21 @@ type AllocSignalRequest struct {
|
|||||||
Signal string
|
Signal string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type AllocPauseRequest struct {
|
||||||
|
Task string
|
||||||
|
|
||||||
|
// ScheduleState must be one of "pause", "run", "scheduled".
|
||||||
|
ScheduleState string
|
||||||
|
}
|
||||||
|
|
||||||
|
type AllocGetPauseResponse struct {
|
||||||
|
// ScheduleState will be one of "" (run), "force_run", "scheduled_pause",
|
||||||
|
// "force_pause", or "schedule_resume".
|
||||||
|
//
|
||||||
|
// See nomad/structs/task_sched.go for details.
|
||||||
|
ScheduleState string
|
||||||
|
}
|
||||||
|
|
||||||
// GenericResponse is used to respond to a request where no
|
// GenericResponse is used to respond to a request where no
|
||||||
// specific response information is needed.
|
// specific response information is needed.
|
||||||
type GenericResponse struct {
|
type GenericResponse struct {
|
||||||
@@ -525,7 +563,8 @@ type GenericResponse struct {
|
|||||||
|
|
||||||
// RescheduleTracker encapsulates previous reschedule events
|
// RescheduleTracker encapsulates previous reschedule events
|
||||||
type RescheduleTracker struct {
|
type RescheduleTracker struct {
|
||||||
Events []*RescheduleEvent
|
Events []*RescheduleEvent
|
||||||
|
LastReschedule string
|
||||||
}
|
}
|
||||||
|
|
||||||
// RescheduleEvent is used to keep track of previous attempts at rescheduling an allocation
|
// RescheduleEvent is used to keep track of previous attempts at rescheduling an allocation
|
||||||
|
|||||||
4
vendor/github.com/hashicorp/nomad/api/allocations_exec.go
generated
vendored
4
vendor/github.com/hashicorp/nomad/api/allocations_exec.go
generated
vendored
@@ -109,11 +109,11 @@ func (s *execSession) startConnection() (*websocket.Conn, error) {
|
|||||||
var conn *websocket.Conn
|
var conn *websocket.Conn
|
||||||
|
|
||||||
if nodeClient != nil {
|
if nodeClient != nil {
|
||||||
conn, _, _ = nodeClient.websocket(reqPath, q)
|
conn, _, _ = nodeClient.websocket(reqPath, q) //nolint:bodyclose // gorilla/websocket Dialer.DialContext() does not require the body to be closed.
|
||||||
}
|
}
|
||||||
|
|
||||||
if conn == nil {
|
if conn == nil {
|
||||||
conn, _, err = s.client.websocket(reqPath, q)
|
conn, _, err = s.client.websocket(reqPath, q) //nolint:bodyclose // gorilla/websocket Dialer.DialContext() does not require the body to be closed.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
28
vendor/github.com/hashicorp/nomad/api/api.go
generated
vendored
28
vendor/github.com/hashicorp/nomad/api/api.go
generated
vendored
@@ -234,7 +234,6 @@ func (c *Config) ClientConfig(region, address string, tlsEnabled bool) *Config {
|
|||||||
HttpAuth: c.HttpAuth,
|
HttpAuth: c.HttpAuth,
|
||||||
WaitTime: c.WaitTime,
|
WaitTime: c.WaitTime,
|
||||||
TLSConfig: c.TLSConfig.Copy(),
|
TLSConfig: c.TLSConfig.Copy(),
|
||||||
url: copyURL(c.url),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the tls server name for connecting to a client
|
// Update the tls server name for connecting to a client
|
||||||
@@ -435,7 +434,7 @@ func cloneWithTimeout(httpClient *http.Client, t time.Duration) (*http.Client, e
|
|||||||
return &nc, nil
|
return &nc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigureTLS applies a set of TLS configurations to the the HTTP client.
|
// ConfigureTLS applies a set of TLS configurations to the HTTP client.
|
||||||
func ConfigureTLS(httpClient *http.Client, tlsConfig *TLSConfig) error {
|
func ConfigureTLS(httpClient *http.Client, tlsConfig *TLSConfig) error {
|
||||||
if tlsConfig == nil {
|
if tlsConfig == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -509,9 +508,13 @@ func NewClient(config *Config) (*Client, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// we have to test the address that comes from DefaultConfig, because it
|
// we have to test the address that comes from DefaultConfig, because it
|
||||||
// could be the value of NOMAD_ADDR which is applied without testing
|
// could be the value of NOMAD_ADDR which is applied without testing. But
|
||||||
if config.url, err = url.Parse(config.Address); err != nil {
|
// only on the first use of this Config, otherwise we'll have mutated the
|
||||||
return nil, fmt.Errorf("invalid address '%s': %v", config.Address, err)
|
// address
|
||||||
|
if config.url == nil {
|
||||||
|
if config.url, err = url.Parse(config.Address); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid address '%s': %v", config.Address, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := config.HttpClient
|
httpClient := config.HttpClient
|
||||||
@@ -932,7 +935,7 @@ func (c *Client) rawQuery(endpoint string, q *QueryOptions) (io.ReadCloser, erro
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
_, resp, err := requireOK(c.doRequest(r))
|
_, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1036,7 +1039,7 @@ func (c *Client) query(endpoint string, out any, q *QueryOptions) (*QueryMeta, e
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
rtt, resp, err := requireOK(c.doRequest(r))
|
rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1061,7 +1064,7 @@ func (c *Client) putQuery(endpoint string, in, out any, q *QueryOptions) (*Query
|
|||||||
}
|
}
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
r.obj = in
|
r.obj = in
|
||||||
rtt, resp, err := requireOK(c.doRequest(r))
|
rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1092,7 +1095,7 @@ func (c *Client) postQuery(endpoint string, in, out any, q *QueryOptions) (*Quer
|
|||||||
}
|
}
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
r.obj = in
|
r.obj = in
|
||||||
rtt, resp, err := requireOK(c.doRequest(r))
|
rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1125,7 +1128,7 @@ func (c *Client) write(verb, endpoint string, in, out any, q *WriteOptions) (*Wr
|
|||||||
}
|
}
|
||||||
r.setWriteOptions(q)
|
r.setWriteOptions(q)
|
||||||
r.obj = in
|
r.obj = in
|
||||||
rtt, resp, err := requireOK(c.doRequest(r))
|
rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1151,7 +1154,7 @@ func (c *Client) delete(endpoint string, in, out any, q *WriteOptions) (*WriteMe
|
|||||||
}
|
}
|
||||||
r.setWriteOptions(q)
|
r.setWriteOptions(q)
|
||||||
r.obj = in
|
r.obj = in
|
||||||
rtt, resp, err := requireOK(c.doRequest(r))
|
rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1184,6 +1187,9 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to parse X-Nomad-LastContact: %v", err)
|
return fmt.Errorf("Failed to parse X-Nomad-LastContact: %v", err)
|
||||||
}
|
}
|
||||||
|
if last > math.MaxInt64 {
|
||||||
|
return fmt.Errorf("Last contact duration is out of range: %d", last)
|
||||||
|
}
|
||||||
q.LastContact = time.Duration(last) * time.Millisecond
|
q.LastContact = time.Duration(last) * time.Millisecond
|
||||||
q.NextToken = header.Get("X-Nomad-NextToken")
|
q.NextToken = header.Get("X-Nomad-NextToken")
|
||||||
|
|
||||||
|
|||||||
275
vendor/github.com/hashicorp/nomad/api/consul.go
generated
vendored
275
vendor/github.com/hashicorp/nomad/api/consul.go
generated
vendored
@@ -4,9 +4,9 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"maps"
|
||||||
|
"slices"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/exp/maps"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Consul represents configuration related to consul.
|
// Consul represents configuration related to consul.
|
||||||
@@ -16,6 +16,10 @@ type Consul struct {
|
|||||||
|
|
||||||
// (Enterprise-only) Cluster represents a specific Consul cluster.
|
// (Enterprise-only) Cluster represents a specific Consul cluster.
|
||||||
Cluster string `mapstructure:"cluster" hcl:"cluster,optional"`
|
Cluster string `mapstructure:"cluster" hcl:"cluster,optional"`
|
||||||
|
|
||||||
|
// Partition is the Consul admin partition where the workload should
|
||||||
|
// run. This is available in Nomad CE but only works with Consul ENT
|
||||||
|
Partition string `mapstructure:"partition" hcl:"partition,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Canonicalize Consul into a canonical form. The Canonicalize structs containing
|
// Canonicalize Consul into a canonical form. The Canonicalize structs containing
|
||||||
@@ -29,6 +33,9 @@ func (c *Consul) Canonicalize() {
|
|||||||
// we should inherit from higher up (i.e. job<-group). Likewise, if
|
// we should inherit from higher up (i.e. job<-group). Likewise, if
|
||||||
// Namespace is set but empty, that is a choice to use the default consul
|
// Namespace is set but empty, that is a choice to use the default consul
|
||||||
// namespace.
|
// namespace.
|
||||||
|
|
||||||
|
// Partition should never be defaulted to "default" because non-ENT Consul
|
||||||
|
// clusters don't have admin partitions
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy creates a deep copy of c.
|
// Copy creates a deep copy of c.
|
||||||
@@ -36,6 +43,7 @@ func (c *Consul) Copy() *Consul {
|
|||||||
return &Consul{
|
return &Consul{
|
||||||
Namespace: c.Namespace,
|
Namespace: c.Namespace,
|
||||||
Cluster: c.Cluster,
|
Cluster: c.Cluster,
|
||||||
|
Partition: c.Partition,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,6 +115,7 @@ type SidecarTask struct {
|
|||||||
LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"`
|
LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"`
|
||||||
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"`
|
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"`
|
||||||
KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"`
|
KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"`
|
||||||
|
VolumeMounts []*VolumeMount `hcl:"volume_mount,block"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *SidecarTask) Canonicalize() {
|
func (st *SidecarTask) Canonicalize() {
|
||||||
@@ -145,16 +154,25 @@ func (st *SidecarTask) Canonicalize() {
|
|||||||
if st.ShutdownDelay == nil {
|
if st.ShutdownDelay == nil {
|
||||||
st.ShutdownDelay = pointerOf(time.Duration(0))
|
st.ShutdownDelay = pointerOf(time.Duration(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, vm := range st.VolumeMounts {
|
||||||
|
vm.Canonicalize()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConsulProxy represents a Consul Connect sidecar proxy jobspec block.
|
// ConsulProxy represents a Consul Connect sidecar proxy jobspec block.
|
||||||
type ConsulProxy struct {
|
type ConsulProxy struct {
|
||||||
LocalServiceAddress string `mapstructure:"local_service_address" hcl:"local_service_address,optional"`
|
LocalServiceAddress string `mapstructure:"local_service_address" hcl:"local_service_address,optional"`
|
||||||
LocalServicePort int `mapstructure:"local_service_port" hcl:"local_service_port,optional"`
|
LocalServicePort int `mapstructure:"local_service_port" hcl:"local_service_port,optional"`
|
||||||
Expose *ConsulExposeConfig `mapstructure:"expose" hcl:"expose,block"`
|
Expose *ConsulExposeConfig `mapstructure:"expose" hcl:"expose,block"`
|
||||||
ExposeConfig *ConsulExposeConfig // Deprecated: only to maintain backwards compatibility. Use Expose instead.
|
ExposeConfig *ConsulExposeConfig // Deprecated: only to maintain backwards compatibility. Use Expose instead.
|
||||||
Upstreams []*ConsulUpstream `hcl:"upstreams,block"`
|
Upstreams []*ConsulUpstream `hcl:"upstreams,block"`
|
||||||
Config map[string]interface{} `hcl:"config,block"`
|
|
||||||
|
// TransparentProxy configures the Envoy sidecar to use "transparent
|
||||||
|
// proxying", which creates IP tables rules inside the network namespace to
|
||||||
|
// ensure traffic flows thru the Envoy proxy
|
||||||
|
TransparentProxy *ConsulTransparentProxy `mapstructure:"transparent_proxy" hcl:"transparent_proxy,block"`
|
||||||
|
Config map[string]interface{} `hcl:"config,block"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cp *ConsulProxy) Canonicalize() {
|
func (cp *ConsulProxy) Canonicalize() {
|
||||||
@@ -168,6 +186,8 @@ func (cp *ConsulProxy) Canonicalize() {
|
|||||||
cp.Upstreams = nil
|
cp.Upstreams = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cp.TransparentProxy.Canonicalize()
|
||||||
|
|
||||||
for _, upstream := range cp.Upstreams {
|
for _, upstream := range cp.Upstreams {
|
||||||
upstream.Canonicalize()
|
upstream.Canonicalize()
|
||||||
}
|
}
|
||||||
@@ -199,7 +219,6 @@ type ConsulMeshGateway struct {
|
|||||||
func (c *ConsulMeshGateway) Canonicalize() {
|
func (c *ConsulMeshGateway) Canonicalize() {
|
||||||
// Mode may be empty string, indicating behavior will defer to Consul
|
// Mode may be empty string, indicating behavior will defer to Consul
|
||||||
// service-defaults config entry.
|
// service-defaults config entry.
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ConsulMeshGateway) Copy() *ConsulMeshGateway {
|
func (c *ConsulMeshGateway) Copy() *ConsulMeshGateway {
|
||||||
@@ -217,6 +236,7 @@ type ConsulUpstream struct {
|
|||||||
DestinationName string `mapstructure:"destination_name" hcl:"destination_name,optional"`
|
DestinationName string `mapstructure:"destination_name" hcl:"destination_name,optional"`
|
||||||
DestinationNamespace string `mapstructure:"destination_namespace" hcl:"destination_namespace,optional"`
|
DestinationNamespace string `mapstructure:"destination_namespace" hcl:"destination_namespace,optional"`
|
||||||
DestinationPeer string `mapstructure:"destination_peer" hcl:"destination_peer,optional"`
|
DestinationPeer string `mapstructure:"destination_peer" hcl:"destination_peer,optional"`
|
||||||
|
DestinationPartition string `mapstructure:"destination_partition" hcl:"destination_partition,optional"`
|
||||||
DestinationType string `mapstructure:"destination_type" hcl:"destination_type,optional"`
|
DestinationType string `mapstructure:"destination_type" hcl:"destination_type,optional"`
|
||||||
LocalBindPort int `mapstructure:"local_bind_port" hcl:"local_bind_port,optional"`
|
LocalBindPort int `mapstructure:"local_bind_port" hcl:"local_bind_port,optional"`
|
||||||
Datacenter string `mapstructure:"datacenter" hcl:"datacenter,optional"`
|
Datacenter string `mapstructure:"datacenter" hcl:"datacenter,optional"`
|
||||||
@@ -231,19 +251,11 @@ func (cu *ConsulUpstream) Copy() *ConsulUpstream {
|
|||||||
if cu == nil {
|
if cu == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &ConsulUpstream{
|
up := new(ConsulUpstream)
|
||||||
DestinationName: cu.DestinationName,
|
*up = *cu
|
||||||
DestinationNamespace: cu.DestinationNamespace,
|
up.MeshGateway = cu.MeshGateway.Copy()
|
||||||
DestinationPeer: cu.DestinationPeer,
|
up.Config = maps.Clone(cu.Config)
|
||||||
DestinationType: cu.DestinationType,
|
return up
|
||||||
LocalBindPort: cu.LocalBindPort,
|
|
||||||
Datacenter: cu.Datacenter,
|
|
||||||
LocalBindAddress: cu.LocalBindAddress,
|
|
||||||
LocalBindSocketPath: cu.LocalBindSocketPath,
|
|
||||||
LocalBindSocketMode: cu.LocalBindSocketMode,
|
|
||||||
MeshGateway: cu.MeshGateway.Copy(),
|
|
||||||
Config: maps.Clone(cu.Config),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cu *ConsulUpstream) Canonicalize() {
|
func (cu *ConsulUpstream) Canonicalize() {
|
||||||
@@ -256,6 +268,61 @@ func (cu *ConsulUpstream) Canonicalize() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConsulTransparentProxy is used to configure the Envoy sidecar for
|
||||||
|
// "transparent proxying", which creates IP tables rules inside the network
|
||||||
|
// namespace to ensure traffic flows thru the Envoy proxy
|
||||||
|
type ConsulTransparentProxy struct {
|
||||||
|
// UID of the Envoy proxy. Defaults to the default Envoy proxy container
|
||||||
|
// image user.
|
||||||
|
UID string `mapstructure:"uid" hcl:"uid,optional"`
|
||||||
|
|
||||||
|
// OutboundPort is the Envoy proxy's outbound listener port. Inbound TCP
|
||||||
|
// traffic hitting the PROXY_IN_REDIRECT chain will be redirected here.
|
||||||
|
// Defaults to 15001.
|
||||||
|
OutboundPort uint16 `mapstructure:"outbound_port" hcl:"outbound_port,optional"`
|
||||||
|
|
||||||
|
// ExcludeInboundPorts is an additional set of ports will be excluded from
|
||||||
|
// redirection to the Envoy proxy. Can be Port.Label or Port.Value. This set
|
||||||
|
// will be added to the ports automatically excluded for the Expose.Port and
|
||||||
|
// Check.Expose fields.
|
||||||
|
ExcludeInboundPorts []string `mapstructure:"exclude_inbound_ports" hcl:"exclude_inbound_ports,optional"`
|
||||||
|
|
||||||
|
// ExcludeOutboundPorts is a set of outbound ports that will not be
|
||||||
|
// redirected to the Envoy proxy, specified as port numbers.
|
||||||
|
ExcludeOutboundPorts []uint16 `mapstructure:"exclude_outbound_ports" hcl:"exclude_outbound_ports,optional"`
|
||||||
|
|
||||||
|
// ExcludeOutboundCIDRs is a set of outbound CIDR blocks that will not be
|
||||||
|
// redirected to the Envoy proxy.
|
||||||
|
ExcludeOutboundCIDRs []string `mapstructure:"exclude_outbound_cidrs" hcl:"exclude_outbound_cidrs,optional"`
|
||||||
|
|
||||||
|
// ExcludeUIDs is a set of user IDs whose network traffic will not be
|
||||||
|
// redirected through the Envoy proxy.
|
||||||
|
ExcludeUIDs []string `mapstructure:"exclude_uids" hcl:"exclude_uids,optional"`
|
||||||
|
|
||||||
|
// NoDNS disables redirection of DNS traffic to Consul DNS. By default NoDNS
|
||||||
|
// is false and transparent proxy will direct DNS traffic to Consul DNS if
|
||||||
|
// available on the client.
|
||||||
|
NoDNS bool `mapstructure:"no_dns" hcl:"no_dns,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp *ConsulTransparentProxy) Canonicalize() {
|
||||||
|
if tp == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(tp.ExcludeInboundPorts) == 0 {
|
||||||
|
tp.ExcludeInboundPorts = nil
|
||||||
|
}
|
||||||
|
if len(tp.ExcludeOutboundCIDRs) == 0 {
|
||||||
|
tp.ExcludeOutboundCIDRs = nil
|
||||||
|
}
|
||||||
|
if len(tp.ExcludeOutboundPorts) == 0 {
|
||||||
|
tp.ExcludeOutboundPorts = nil
|
||||||
|
}
|
||||||
|
if len(tp.ExcludeUIDs) == 0 {
|
||||||
|
tp.ExcludeUIDs = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type ConsulExposeConfig struct {
|
type ConsulExposeConfig struct {
|
||||||
Paths []*ConsulExposePath `mapstructure:"path" hcl:"path,block"`
|
Paths []*ConsulExposePath `mapstructure:"path" hcl:"path,block"`
|
||||||
Path []*ConsulExposePath // Deprecated: only to maintain backwards compatibility. Use Paths instead.
|
Path []*ConsulExposePath // Deprecated: only to maintain backwards compatibility. Use Paths instead.
|
||||||
@@ -393,12 +460,52 @@ func (p *ConsulGatewayProxy) Copy() *ConsulGatewayProxy {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConsulGatewayTLSConfig is used to configure TLS for a gateway.
|
// ConsulGatewayTLSSDSConfig is used to configure the gateway's TLS listener to
|
||||||
|
// load certificates from an external Secret Discovery Service (SDS)
|
||||||
|
type ConsulGatewayTLSSDSConfig struct {
|
||||||
|
// ClusterName specifies the name of the SDS cluster where Consul should
|
||||||
|
// retrieve certificates.
|
||||||
|
ClusterName string `hcl:"cluster_name,optional" mapstructure:"cluster_name"`
|
||||||
|
|
||||||
|
// CertResource specifies an SDS resource name
|
||||||
|
CertResource string `hcl:"cert_resource,optional" mapstructure:"cert_resource"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConsulGatewayTLSSDSConfig) Copy() *ConsulGatewayTLSSDSConfig {
|
||||||
|
if c == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ConsulGatewayTLSSDSConfig{
|
||||||
|
ClusterName: c.ClusterName,
|
||||||
|
CertResource: c.CertResource,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsulGatewayTLSConfig is used to configure TLS for a gateway. Both
|
||||||
|
// ConsulIngressConfigEntry and ConsulIngressService use this struct. For more
|
||||||
|
// details, consult the Consul documentation:
|
||||||
|
// https://developer.hashicorp.com/consul/docs/connect/config-entries/ingress-gateway#listeners-services-tls
|
||||||
type ConsulGatewayTLSConfig struct {
|
type ConsulGatewayTLSConfig struct {
|
||||||
Enabled bool `hcl:"enabled,optional"`
|
|
||||||
TLSMinVersion string `hcl:"tls_min_version,optional" mapstructure:"tls_min_version"`
|
// Enabled indicates whether TLS is enabled for the configuration entry
|
||||||
TLSMaxVersion string `hcl:"tls_max_version,optional" mapstructure:"tls_max_version"`
|
Enabled bool `hcl:"enabled,optional"`
|
||||||
CipherSuites []string `hcl:"cipher_suites,optional" mapstructure:"cipher_suites"`
|
|
||||||
|
// TLSMinVersion specifies the minimum TLS version supported for gateway
|
||||||
|
// listeners.
|
||||||
|
TLSMinVersion string `hcl:"tls_min_version,optional" mapstructure:"tls_min_version"`
|
||||||
|
|
||||||
|
// TLSMaxVersion specifies the maxmimum TLS version supported for gateway
|
||||||
|
// listeners.
|
||||||
|
TLSMaxVersion string `hcl:"tls_max_version,optional" mapstructure:"tls_max_version"`
|
||||||
|
|
||||||
|
// CipherSuites specifies a list of cipher suites that gateway listeners
|
||||||
|
// support when negotiating connections using TLS 1.2 or older.
|
||||||
|
CipherSuites []string `hcl:"cipher_suites,optional" mapstructure:"cipher_suites"`
|
||||||
|
|
||||||
|
// SDS specifies parameters that configure the listener to load TLS
|
||||||
|
// certificates from an external Secrets Discovery Service (SDS).
|
||||||
|
SDS *ConsulGatewayTLSSDSConfig `hcl:"sds,block" mapstructure:"sds"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *ConsulGatewayTLSConfig) Canonicalize() {
|
func (tc *ConsulGatewayTLSConfig) Canonicalize() {
|
||||||
@@ -413,6 +520,7 @@ func (tc *ConsulGatewayTLSConfig) Copy() *ConsulGatewayTLSConfig {
|
|||||||
Enabled: tc.Enabled,
|
Enabled: tc.Enabled,
|
||||||
TLSMinVersion: tc.TLSMinVersion,
|
TLSMinVersion: tc.TLSMinVersion,
|
||||||
TLSMaxVersion: tc.TLSMaxVersion,
|
TLSMaxVersion: tc.TLSMaxVersion,
|
||||||
|
SDS: tc.SDS.Copy(),
|
||||||
}
|
}
|
||||||
if len(tc.CipherSuites) != 0 {
|
if len(tc.CipherSuites) != 0 {
|
||||||
cipherSuites := make([]string, len(tc.CipherSuites))
|
cipherSuites := make([]string, len(tc.CipherSuites))
|
||||||
@@ -423,13 +531,90 @@ func (tc *ConsulGatewayTLSConfig) Copy() *ConsulGatewayTLSConfig {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConsulIngressService is used to configure a service fronted by the ingress gateway.
|
// ConsulHTTPHeaderModifiers is a set of rules for HTTP header modification that
|
||||||
|
// should be performed by proxies as the request passes through them. It can
|
||||||
|
// operate on either request or response headers depending on the context in
|
||||||
|
// which it is used.
|
||||||
|
type ConsulHTTPHeaderModifiers struct {
|
||||||
|
// Add is a set of name -> value pairs that should be appended to the
|
||||||
|
// request or response (i.e. allowing duplicates if the same header already
|
||||||
|
// exists).
|
||||||
|
Add map[string]string `hcl:"add,block" mapstructure:"add"`
|
||||||
|
|
||||||
|
// Set is a set of name -> value pairs that should be added to the request
|
||||||
|
// or response, overwriting any existing header values of the same name.
|
||||||
|
Set map[string]string `hcl:"set,block" mapstructure:"set"`
|
||||||
|
|
||||||
|
// Remove is the set of header names that should be stripped from the
|
||||||
|
// request or response.
|
||||||
|
Remove []string `hcl:"remove,optional" mapstructure:"remove"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ConsulHTTPHeaderModifiers) Copy() *ConsulHTTPHeaderModifiers {
|
||||||
|
if h == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ConsulHTTPHeaderModifiers{
|
||||||
|
Add: maps.Clone(h.Add),
|
||||||
|
Set: maps.Clone(h.Set),
|
||||||
|
Remove: slices.Clone(h.Remove),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ConsulHTTPHeaderModifiers) Canonicalize() {
|
||||||
|
if h == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(h.Add) == 0 {
|
||||||
|
h.Add = nil
|
||||||
|
}
|
||||||
|
if len(h.Set) == 0 {
|
||||||
|
h.Set = nil
|
||||||
|
}
|
||||||
|
if len(h.Remove) == 0 {
|
||||||
|
h.Remove = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsulIngressService is used to configure a service fronted by the ingress
|
||||||
|
// gateway. For more details, consult the Consul documentation:
|
||||||
|
// https://developer.hashicorp.com/consul/docs/connect/config-entries/ingress-gateway
|
||||||
type ConsulIngressService struct {
|
type ConsulIngressService struct {
|
||||||
// Namespace is not yet supported.
|
// Namespace is not yet supported.
|
||||||
// Namespace string
|
// Namespace string
|
||||||
|
|
||||||
|
// Name of the service exposed through this listener.
|
||||||
Name string `hcl:"name,optional"`
|
Name string `hcl:"name,optional"`
|
||||||
|
|
||||||
|
// Hosts specifies one or more hosts that the listening services can receive
|
||||||
|
// requests on.
|
||||||
Hosts []string `hcl:"hosts,optional"`
|
Hosts []string `hcl:"hosts,optional"`
|
||||||
|
|
||||||
|
// TLS specifies a TLS configuration override for a specific service. If
|
||||||
|
// unset this will fallback to the ConsulIngressConfigEntry's own TLS field.
|
||||||
|
TLS *ConsulGatewayTLSConfig `hcl:"tls,block" mapstructure:"tls"`
|
||||||
|
|
||||||
|
// RequestHeaders specifies a set of HTTP-specific header modification rules
|
||||||
|
// applied to requests routed through the gateway
|
||||||
|
RequestHeaders *ConsulHTTPHeaderModifiers `hcl:"request_headers,block" mapstructure:"request_headers"`
|
||||||
|
|
||||||
|
// ResponseHeader specifies a set of HTTP-specific header modification rules
|
||||||
|
// applied to responses routed through the gateway
|
||||||
|
ResponseHeaders *ConsulHTTPHeaderModifiers `hcl:"response_headers,block" mapstructure:"response_headers"`
|
||||||
|
|
||||||
|
// MaxConnections specifies the maximum number of HTTP/1.1 connections a
|
||||||
|
// service instance is allowed to establish against the upstream
|
||||||
|
MaxConnections *uint32 `hcl:"max_connections,optional" mapstructure:"max_connections"`
|
||||||
|
|
||||||
|
// MaxPendingRequests specifies the maximum number of requests that are
|
||||||
|
// allowed to queue while waiting to establish a connection
|
||||||
|
MaxPendingRequests *uint32 `hcl:"max_pending_requests,optional" mapstructure:"max_pending_requests"`
|
||||||
|
|
||||||
|
// MaxConcurrentRequests specifies the maximum number of concurrent HTTP/2
|
||||||
|
// traffic requests that are allowed at a single point in time
|
||||||
|
MaxConcurrentRequests *uint32 `hcl:"max_concurrent_requests,optional" mapstructure:"max_concurrent_requests"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ConsulIngressService) Canonicalize() {
|
func (s *ConsulIngressService) Canonicalize() {
|
||||||
@@ -440,6 +625,9 @@ func (s *ConsulIngressService) Canonicalize() {
|
|||||||
if len(s.Hosts) == 0 {
|
if len(s.Hosts) == 0 {
|
||||||
s.Hosts = nil
|
s.Hosts = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s.RequestHeaders.Canonicalize()
|
||||||
|
s.ResponseHeaders.Canonicalize()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ConsulIngressService) Copy() *ConsulIngressService {
|
func (s *ConsulIngressService) Copy() *ConsulIngressService {
|
||||||
@@ -447,16 +635,19 @@ func (s *ConsulIngressService) Copy() *ConsulIngressService {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var hosts []string = nil
|
ns := new(ConsulIngressService)
|
||||||
if n := len(s.Hosts); n > 0 {
|
*ns = *s
|
||||||
hosts = make([]string, n)
|
|
||||||
copy(hosts, s.Hosts)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ConsulIngressService{
|
ns.Hosts = slices.Clone(s.Hosts)
|
||||||
Name: s.Name,
|
ns.RequestHeaders = s.RequestHeaders.Copy()
|
||||||
Hosts: hosts,
|
ns.ResponseHeaders = s.ResponseHeaders.Copy()
|
||||||
}
|
ns.TLS = s.TLS.Copy()
|
||||||
|
|
||||||
|
ns.MaxConnections = pointerCopy(s.MaxConnections)
|
||||||
|
ns.MaxPendingRequests = pointerCopy(s.MaxPendingRequests)
|
||||||
|
ns.MaxConcurrentRequests = pointerCopy(s.MaxConcurrentRequests)
|
||||||
|
|
||||||
|
return ns
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -514,7 +705,11 @@ type ConsulIngressConfigEntry struct {
|
|||||||
// Namespace is not yet supported.
|
// Namespace is not yet supported.
|
||||||
// Namespace string
|
// Namespace string
|
||||||
|
|
||||||
TLS *ConsulGatewayTLSConfig `hcl:"tls,block"`
|
// TLS specifies a TLS configuration for the gateway.
|
||||||
|
TLS *ConsulGatewayTLSConfig `hcl:"tls,block"`
|
||||||
|
|
||||||
|
// Listeners specifies a list of listeners in the mesh for the
|
||||||
|
// gateway. Listeners are uniquely identified by their port number.
|
||||||
Listeners []*ConsulIngressListener `hcl:"listener,block"`
|
Listeners []*ConsulIngressListener `hcl:"listener,block"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -630,9 +825,7 @@ type ConsulMeshConfigEntry struct {
|
|||||||
// nothing in here
|
// nothing in here
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *ConsulMeshConfigEntry) Canonicalize() {
|
func (e *ConsulMeshConfigEntry) Canonicalize() {}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ConsulMeshConfigEntry) Copy() *ConsulMeshConfigEntry {
|
func (e *ConsulMeshConfigEntry) Copy() *ConsulMeshConfigEntry {
|
||||||
if e == nil {
|
if e == nil {
|
||||||
|
|||||||
1
vendor/github.com/hashicorp/nomad/api/contexts/contexts.go
generated
vendored
1
vendor/github.com/hashicorp/nomad/api/contexts/contexts.go
generated
vendored
@@ -23,6 +23,7 @@ const (
|
|||||||
Plugins Context = "plugins"
|
Plugins Context = "plugins"
|
||||||
Variables Context = "vars"
|
Variables Context = "vars"
|
||||||
Volumes Context = "volumes"
|
Volumes Context = "volumes"
|
||||||
|
HostVolumes Context = "host_volumes"
|
||||||
|
|
||||||
// These Context types are used to associate a search result from a lower
|
// These Context types are used to associate a search result from a lower
|
||||||
// level Nomad object with one of the higher level Context types above.
|
// level Nomad object with one of the higher level Context types above.
|
||||||
|
|||||||
89
vendor/github.com/hashicorp/nomad/api/csi.go
generated
vendored
89
vendor/github.com/hashicorp/nomad/api/csi.go
generated
vendored
@@ -76,13 +76,27 @@ func (v *CSIVolumes) Info(id string, q *QueryOptions) (*CSIVolume, *QueryMeta, e
|
|||||||
// Register registers a single CSIVolume with Nomad. The volume must already
|
// Register registers a single CSIVolume with Nomad. The volume must already
|
||||||
// exist in the external storage provider.
|
// exist in the external storage provider.
|
||||||
func (v *CSIVolumes) Register(vol *CSIVolume, w *WriteOptions) (*WriteMeta, error) {
|
func (v *CSIVolumes) Register(vol *CSIVolume, w *WriteOptions) (*WriteMeta, error) {
|
||||||
req := CSIVolumeRegisterRequest{
|
req := &CSIVolumeRegisterRequest{
|
||||||
Volumes: []*CSIVolume{vol},
|
Volumes: []*CSIVolume{vol},
|
||||||
}
|
}
|
||||||
meta, err := v.client.put("/v1/volume/csi/"+vol.ID, req, nil, w)
|
_, meta, err := v.RegisterOpts(req, w)
|
||||||
return meta, err
|
return meta, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RegisterOpts registers a single CSIVolume with Nomad. The volume must already
|
||||||
|
// exist in the external storage provider. It expects a single volume in the
|
||||||
|
// request.
|
||||||
|
func (v *CSIVolumes) RegisterOpts(req *CSIVolumeRegisterRequest, w *WriteOptions) (*CSIVolumeRegisterResponse, *WriteMeta, error) {
|
||||||
|
if w == nil {
|
||||||
|
w = &WriteOptions{}
|
||||||
|
}
|
||||||
|
vol := req.Volumes[0]
|
||||||
|
resp := &CSIVolumeRegisterResponse{}
|
||||||
|
meta, err := v.client.put("/v1/volume/csi/"+vol.ID, req, resp, w)
|
||||||
|
|
||||||
|
return resp, meta, err
|
||||||
|
}
|
||||||
|
|
||||||
// Deregister deregisters a single CSIVolume from Nomad. The volume will not be deleted from the external storage provider.
|
// Deregister deregisters a single CSIVolume from Nomad. The volume will not be deleted from the external storage provider.
|
||||||
func (v *CSIVolumes) Deregister(id string, force bool, w *WriteOptions) error {
|
func (v *CSIVolumes) Deregister(id string, force bool, w *WriteOptions) error {
|
||||||
_, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v?force=%t", url.PathEscape(id), force), nil, nil, w)
|
_, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v?force=%t", url.PathEscape(id), force), nil, nil, w)
|
||||||
@@ -97,15 +111,28 @@ func (v *CSIVolumes) Create(vol *CSIVolume, w *WriteOptions) ([]*CSIVolume, *Wri
|
|||||||
Volumes: []*CSIVolume{vol},
|
Volumes: []*CSIVolume{vol},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := &CSIVolumeCreateResponse{}
|
resp, meta, err := v.CreateOpts(&req, w)
|
||||||
meta, err := v.client.put(fmt.Sprintf("/v1/volume/csi/%v/create", vol.ID), req, resp, w)
|
|
||||||
return resp.Volumes, meta, err
|
return resp.Volumes, meta, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// DEPRECATED: will be removed in Nomad 1.4.0
|
// CreateOpts creates a single CSIVolume in an external storage provider and
|
||||||
|
// registers it with Nomad. You do not need to call Register if this call is
|
||||||
|
// successful. It expects a single volume in the request.
|
||||||
|
func (v *CSIVolumes) CreateOpts(req *CSIVolumeCreateRequest, w *WriteOptions) (*CSIVolumeCreateResponse, *WriteMeta, error) {
|
||||||
|
if w == nil {
|
||||||
|
w = &WriteOptions{}
|
||||||
|
}
|
||||||
|
vol := req.Volumes[0]
|
||||||
|
resp := &CSIVolumeCreateResponse{}
|
||||||
|
meta, err := v.client.put(fmt.Sprintf("/v1/volume/csi/%v/create", vol.ID), req, resp, w)
|
||||||
|
return resp, meta, err
|
||||||
|
}
|
||||||
|
|
||||||
// Delete deletes a CSI volume from an external storage provider. The ID
|
// Delete deletes a CSI volume from an external storage provider. The ID
|
||||||
// passed as an argument here is for the storage provider's ID, so a volume
|
// passed as an argument here is for the storage provider's ID, so a volume
|
||||||
// that's already been deregistered can be deleted.
|
// that's already been deregistered can be deleted.
|
||||||
|
//
|
||||||
|
// Deprecated: will be removed in Nomad 1.4.0
|
||||||
func (v *CSIVolumes) Delete(externalVolID string, w *WriteOptions) error {
|
func (v *CSIVolumes) Delete(externalVolID string, w *WriteOptions) error {
|
||||||
_, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v/delete", url.PathEscape(externalVolID)), nil, nil, w)
|
_, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v/delete", url.PathEscape(externalVolID)), nil, nil, w)
|
||||||
return err
|
return err
|
||||||
@@ -184,8 +211,9 @@ func (v *CSIVolumes) ListSnapshotsOpts(req *CSISnapshotListRequest) (*CSISnapsho
|
|||||||
return resp, qm, nil
|
return resp, qm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DEPRECATED: will be removed in Nomad 1.4.0
|
|
||||||
// ListSnapshots lists external storage volume snapshots.
|
// ListSnapshots lists external storage volume snapshots.
|
||||||
|
//
|
||||||
|
// Deprecated: will be removed in Nomad 1.4.0
|
||||||
func (v *CSIVolumes) ListSnapshots(pluginID string, secrets string, q *QueryOptions) (*CSISnapshotListResponse, *QueryMeta, error) {
|
func (v *CSIVolumes) ListSnapshots(pluginID string, secrets string, q *QueryOptions) (*CSISnapshotListResponse, *QueryMeta, error) {
|
||||||
var resp *CSISnapshotListResponse
|
var resp *CSISnapshotListResponse
|
||||||
|
|
||||||
@@ -269,26 +297,26 @@ func (o *CSIMountOptions) Merge(p *CSIMountOptions) {
|
|||||||
// API or in Nomad's logs.
|
// API or in Nomad's logs.
|
||||||
type CSISecrets map[string]string
|
type CSISecrets map[string]string
|
||||||
|
|
||||||
func (q *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
|
func (o *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
|
||||||
pairs := []string{}
|
pairs := []string{}
|
||||||
for k, v := range secrets {
|
for k, v := range secrets {
|
||||||
pairs = append(pairs, fmt.Sprintf("%v=%v", k, v))
|
pairs = append(pairs, fmt.Sprintf("%v=%v", k, v))
|
||||||
}
|
}
|
||||||
if q.Headers == nil {
|
if o.Headers == nil {
|
||||||
q.Headers = map[string]string{}
|
o.Headers = map[string]string{}
|
||||||
}
|
}
|
||||||
q.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
|
o.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
|
func (o *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
|
||||||
pairs := []string{}
|
pairs := []string{}
|
||||||
for k, v := range secrets {
|
for k, v := range secrets {
|
||||||
pairs = append(pairs, fmt.Sprintf("%v=%v", k, v))
|
pairs = append(pairs, fmt.Sprintf("%v=%v", k, v))
|
||||||
}
|
}
|
||||||
if w.Headers == nil {
|
if o.Headers == nil {
|
||||||
w.Headers = map[string]string{}
|
o.Headers = map[string]string{}
|
||||||
}
|
}
|
||||||
w.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
|
o.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
// CSIVolume is used for serialization, see also nomad/structs/csi.go
|
// CSIVolume is used for serialization, see also nomad/structs/csi.go
|
||||||
@@ -351,6 +379,11 @@ type CSIVolume struct {
|
|||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
|
|
||||||
|
// CreateTime stored as UnixNano
|
||||||
|
CreateTime int64
|
||||||
|
// ModifyTime stored as UnixNano
|
||||||
|
ModifyTime int64
|
||||||
|
|
||||||
// ExtraKeysHCL is used by the hcl parser to report unexpected keys
|
// ExtraKeysHCL is used by the hcl parser to report unexpected keys
|
||||||
ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"`
|
ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"`
|
||||||
}
|
}
|
||||||
@@ -401,6 +434,11 @@ type CSIVolumeListStub struct {
|
|||||||
|
|
||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
|
|
||||||
|
// CreateTime stored as UnixNano
|
||||||
|
CreateTime int64
|
||||||
|
// ModifyTime stored as UnixNano
|
||||||
|
ModifyTime int64
|
||||||
}
|
}
|
||||||
|
|
||||||
type CSIVolumeListExternalResponse struct {
|
type CSIVolumeListExternalResponse struct {
|
||||||
@@ -440,19 +478,33 @@ func (v CSIVolumeExternalStubSort) Swap(i, j int) {
|
|||||||
|
|
||||||
type CSIVolumeCreateRequest struct {
|
type CSIVolumeCreateRequest struct {
|
||||||
Volumes []*CSIVolume
|
Volumes []*CSIVolume
|
||||||
|
|
||||||
|
// PolicyOverride overrides Sentinel soft-mandatory policy enforcement
|
||||||
|
PolicyOverride bool
|
||||||
|
|
||||||
WriteRequest
|
WriteRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
type CSIVolumeCreateResponse struct {
|
type CSIVolumeCreateResponse struct {
|
||||||
Volumes []*CSIVolume
|
Volumes []*CSIVolume
|
||||||
|
Warnings string
|
||||||
QueryMeta
|
QueryMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
type CSIVolumeRegisterRequest struct {
|
type CSIVolumeRegisterRequest struct {
|
||||||
Volumes []*CSIVolume
|
Volumes []*CSIVolume
|
||||||
|
|
||||||
|
// PolicyOverride overrides Sentinel soft-mandatory policy enforcement
|
||||||
|
PolicyOverride bool
|
||||||
|
|
||||||
WriteRequest
|
WriteRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CSIVolumeRegisterResponse struct {
|
||||||
|
Volumes []*CSIVolume
|
||||||
|
Warnings string
|
||||||
|
}
|
||||||
|
|
||||||
type CSIVolumeDeregisterRequest struct {
|
type CSIVolumeDeregisterRequest struct {
|
||||||
VolumeIDs []string
|
VolumeIDs []string
|
||||||
WriteRequest
|
WriteRequest
|
||||||
@@ -507,7 +559,7 @@ type CSISnapshotCreateResponse struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CSISnapshotListRequest is a request to a controller plugin to list all the
|
// CSISnapshotListRequest is a request to a controller plugin to list all the
|
||||||
// snapshot known to the the storage provider. This request is paginated by
|
// snapshot known to the storage provider. This request is paginated by
|
||||||
// the plugin and accepts the QueryOptions.PerPage and QueryOptions.NextToken
|
// the plugin and accepts the QueryOptions.PerPage and QueryOptions.NextToken
|
||||||
// fields
|
// fields
|
||||||
type CSISnapshotListRequest struct {
|
type CSISnapshotListRequest struct {
|
||||||
@@ -543,6 +595,11 @@ type CSIPlugin struct {
|
|||||||
NodesExpected int
|
NodesExpected int
|
||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
|
|
||||||
|
// CreateTime stored as UnixNano
|
||||||
|
CreateTime int64
|
||||||
|
// ModifyTime stored as UnixNano
|
||||||
|
ModifyTime int64
|
||||||
}
|
}
|
||||||
|
|
||||||
type CSIPluginListStub struct {
|
type CSIPluginListStub struct {
|
||||||
|
|||||||
7
vendor/github.com/hashicorp/nomad/api/deployments.go
generated
vendored
7
vendor/github.com/hashicorp/nomad/api/deployments.go
generated
vendored
@@ -193,6 +193,10 @@ type Deployment struct {
|
|||||||
|
|
||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
|
|
||||||
|
// Creation and modification times, stored as UnixNano
|
||||||
|
CreateTime int64
|
||||||
|
ModifyTime int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeploymentState tracks the state of a deployment for a given task group.
|
// DeploymentState tracks the state of a deployment for a given task group.
|
||||||
@@ -261,6 +265,9 @@ type DeploymentPromoteRequest struct {
|
|||||||
// Groups is used to set the promotion status per task group
|
// Groups is used to set the promotion status per task group
|
||||||
Groups []string
|
Groups []string
|
||||||
|
|
||||||
|
// PromotedAt is the timestamp stored as Unix nano
|
||||||
|
PromotedAt int64
|
||||||
|
|
||||||
WriteRequest
|
WriteRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
2
vendor/github.com/hashicorp/nomad/api/event_stream.go
generated
vendored
2
vendor/github.com/hashicorp/nomad/api/event_stream.go
generated
vendored
@@ -186,7 +186,7 @@ func (e *EventStream) Stream(ctx context.Context, topics map[Topic][]string, ind
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, resp, err := requireOK(e.client.doRequest(r))
|
_, resp, err := requireOK(e.client.doRequest(r)) //nolint:bodyclose
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
15
vendor/github.com/hashicorp/nomad/api/fs.go
generated
vendored
15
vendor/github.com/hashicorp/nomad/api/fs.go
generated
vendored
@@ -389,12 +389,23 @@ func (f *FrameReader) Read(p []byte) (n int, err error) {
|
|||||||
case <-unblock:
|
case <-unblock:
|
||||||
return 0, nil
|
return 0, nil
|
||||||
case err := <-f.errCh:
|
case err := <-f.errCh:
|
||||||
return 0, err
|
// check for race with f.frames before returning error
|
||||||
|
select {
|
||||||
|
case frame, ok := <-f.frames:
|
||||||
|
if !ok {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
f.frame = frame
|
||||||
|
|
||||||
|
// Store the total offset into the file
|
||||||
|
f.byteOffset = int(f.frame.Offset)
|
||||||
|
default:
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
case <-f.cancelCh:
|
case <-f.cancelCh:
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the data out of the frame and update our offset
|
// Copy the data out of the frame and update our offset
|
||||||
n = copy(p, f.frame.Data[f.frameOffset:])
|
n = copy(p, f.frame.Data[f.frameOffset:])
|
||||||
f.frameOffset += n
|
f.frameOffset += n
|
||||||
|
|||||||
70
vendor/github.com/hashicorp/nomad/api/host_volume_claims.go
generated
vendored
Normal file
70
vendor/github.com/hashicorp/nomad/api/host_volume_claims.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import "net/url"
|
||||||
|
|
||||||
|
// TaskGroupHostVolumeClaim associates a task group with a host volume ID. It's
|
||||||
|
// used for stateful deployments, i.e., volume requests with "sticky" set to
|
||||||
|
// true.
|
||||||
|
type TaskGroupHostVolumeClaim struct {
|
||||||
|
ID string `mapstructure:"id"`
|
||||||
|
Namespace string `mapstructure:"namespace"`
|
||||||
|
JobID string `mapstructure:"job_id"`
|
||||||
|
TaskGroupName string `mapstructure:"task_group_name"`
|
||||||
|
AllocID string `mapstructure:"alloc_id"`
|
||||||
|
VolumeID string `mapstructure:"volume_id"`
|
||||||
|
VolumeName string `mapstructure:"volume_name"`
|
||||||
|
|
||||||
|
CreateIndex uint64
|
||||||
|
ModifyIndex uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskGroupHostVolumeClaims is used to access the API.
|
||||||
|
type TaskGroupHostVolumeClaims struct {
|
||||||
|
client *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskGroupHostVolumeClaims returns a new handle on the API.
|
||||||
|
func (c *Client) TaskGroupHostVolumeClaims() *TaskGroupHostVolumeClaims {
|
||||||
|
return &TaskGroupHostVolumeClaims{client: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
type TaskGroupHostVolumeClaimsListRequest struct {
|
||||||
|
JobID string
|
||||||
|
TaskGroup string
|
||||||
|
VolumeName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tgvc *TaskGroupHostVolumeClaims) List(req *TaskGroupHostVolumeClaimsListRequest, opts *QueryOptions) ([]*TaskGroupHostVolumeClaim, *QueryMeta, error) {
|
||||||
|
|
||||||
|
qv := url.Values{}
|
||||||
|
if req != nil {
|
||||||
|
if req.JobID != "" {
|
||||||
|
qv.Set("job_id", req.JobID)
|
||||||
|
}
|
||||||
|
if req.TaskGroup != "" {
|
||||||
|
qv.Set("task_group", req.TaskGroup)
|
||||||
|
}
|
||||||
|
if req.VolumeName != "" {
|
||||||
|
qv.Set("volume_name", req.VolumeName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var out []*TaskGroupHostVolumeClaim
|
||||||
|
qm, err := tgvc.client.query("/v1/volumes/claims?"+qv.Encode(), &out, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, qm, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tgvc *TaskGroupHostVolumeClaims) Delete(claimID string, opts *WriteOptions) (*WriteMeta, error) {
|
||||||
|
path, err := url.JoinPath("/v1/volumes/claim", url.PathEscape(claimID))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
wm, err := tgvc.client.delete(path, nil, nil, opts)
|
||||||
|
return wm, err
|
||||||
|
}
|
||||||
253
vendor/github.com/hashicorp/nomad/api/host_volumes.go
generated
vendored
Normal file
253
vendor/github.com/hashicorp/nomad/api/host_volumes.go
generated
vendored
Normal file
@@ -0,0 +1,253 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import "net/url"
|
||||||
|
|
||||||
|
// HostVolume represents a Dynamic Host Volume: a volume associated with a
|
||||||
|
// specific Nomad client agent but created via API.
|
||||||
|
type HostVolume struct {
|
||||||
|
// Namespace is the Nomad namespace for the host volume, which constrains
|
||||||
|
// which jobs can mount it.
|
||||||
|
Namespace string `mapstructure:"namespace" hcl:"namespace"`
|
||||||
|
|
||||||
|
// ID is a UUID-like string generated by the server.
|
||||||
|
ID string `mapstructure:"id" hcl:"id"`
|
||||||
|
|
||||||
|
// Name is the name that group.volume will use to identify the volume
|
||||||
|
// source. Not expected to be unique.
|
||||||
|
Name string `mapstructure:"name" hcl:"name"`
|
||||||
|
|
||||||
|
// PluginID is the name of the host volume plugin on the client that will be
|
||||||
|
// used for creating the volume. If omitted, the client will use its default
|
||||||
|
// built-in plugin.
|
||||||
|
PluginID string `mapstructure:"plugin_id" hcl:"plugin_id"`
|
||||||
|
|
||||||
|
// NodePool is the node pool of the node where the volume is placed. If the
|
||||||
|
// user doesn't provide a node ID, a node will be selected using the
|
||||||
|
// NodePool and Constraints. If the user provides both NodePool and NodeID,
|
||||||
|
// NodePool will be used to validate the request. If omitted, the server
|
||||||
|
// will populate this value in before writing the volume to Raft.
|
||||||
|
NodePool string `mapstructure:"node_pool" hcl:"node_pool"`
|
||||||
|
|
||||||
|
// NodeID is the node where the volume is placed. If the user doesn't
|
||||||
|
// provide a NodeID, one will be selected using the NodePool and
|
||||||
|
// Constraints. If omitted, this field will then be populated by the server
|
||||||
|
// before writing the volume to Raft.
|
||||||
|
NodeID string `mapstructure:"node_id" hcl:"node_id"`
|
||||||
|
|
||||||
|
// Constraints are optional. If the NodeID is not provided, the NodePool and
|
||||||
|
// Constraints are used to select a node. If the NodeID is provided,
|
||||||
|
// Constraints are used to validate that the node meets those constraints at
|
||||||
|
// the time of volume creation.
|
||||||
|
Constraints []*Constraint `json:",omitempty" hcl:"constraint"`
|
||||||
|
|
||||||
|
// Because storage may allow only specific intervals of size, we accept a
|
||||||
|
// min and max and return the actual capacity when the volume is created or
|
||||||
|
// updated on the client
|
||||||
|
RequestedCapacityMinBytes int64 `mapstructure:"capacity_min" hcl:"capacity_min"`
|
||||||
|
RequestedCapacityMaxBytes int64 `mapstructure:"capacity_max" hcl:"capacity_max"`
|
||||||
|
CapacityBytes int64 `mapstructure:"capacity" hcl:"capacity"`
|
||||||
|
|
||||||
|
// RequestedCapabilities defines the options available to group.volume
|
||||||
|
// blocks. The scheduler checks against the listed capability blocks and
|
||||||
|
// selects a node for placement if *any* capability block works.
|
||||||
|
RequestedCapabilities []*HostVolumeCapability `hcl:"capability"`
|
||||||
|
|
||||||
|
// Parameters are an opaque map of parameters for the host volume plugin.
|
||||||
|
Parameters map[string]string `json:",omitempty"`
|
||||||
|
|
||||||
|
// HostPath is the path on disk where the volume's mount point was
|
||||||
|
// created. We record this to make debugging easier.
|
||||||
|
HostPath string `mapstructure:"host_path" hcl:"host_path"`
|
||||||
|
|
||||||
|
// State represents the overall state of the volume. One of pending, ready,
|
||||||
|
// deleted.
|
||||||
|
State HostVolumeState
|
||||||
|
|
||||||
|
CreateIndex uint64
|
||||||
|
CreateTime int64
|
||||||
|
|
||||||
|
ModifyIndex uint64
|
||||||
|
ModifyTime int64
|
||||||
|
|
||||||
|
// Allocations is the list of non-client-terminal allocations with claims on
|
||||||
|
// this host volume. They are denormalized on read and this field will be
|
||||||
|
// never written to Raft
|
||||||
|
Allocations []*AllocationListStub `json:",omitempty" mapstructure:"-" hcl:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostVolume state reports the current status of the host volume
|
||||||
|
type HostVolumeState string
|
||||||
|
|
||||||
|
const (
|
||||||
|
HostVolumeStatePending HostVolumeState = "pending"
|
||||||
|
HostVolumeStateReady HostVolumeState = "ready"
|
||||||
|
HostVolumeStateUnavailable HostVolumeState = "unavailable"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HostVolumeCapability is the requested attachment and access mode for a volume
|
||||||
|
type HostVolumeCapability struct {
|
||||||
|
AttachmentMode HostVolumeAttachmentMode `mapstructure:"attachment_mode" hcl:"attachment_mode"`
|
||||||
|
AccessMode HostVolumeAccessMode `mapstructure:"access_mode" hcl:"access_mode"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostVolumeAttachmentMode chooses the type of storage API that will be used to
|
||||||
|
// interact with the device.
|
||||||
|
type HostVolumeAttachmentMode string
|
||||||
|
|
||||||
|
const (
|
||||||
|
HostVolumeAttachmentModeUnknown HostVolumeAttachmentMode = ""
|
||||||
|
HostVolumeAttachmentModeBlockDevice HostVolumeAttachmentMode = "block-device"
|
||||||
|
HostVolumeAttachmentModeFilesystem HostVolumeAttachmentMode = "file-system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HostVolumeAccessMode indicates how Nomad should make the volume available to
|
||||||
|
// concurrent allocations.
|
||||||
|
type HostVolumeAccessMode string
|
||||||
|
|
||||||
|
const (
|
||||||
|
HostVolumeAccessModeUnknown HostVolumeAccessMode = ""
|
||||||
|
|
||||||
|
HostVolumeAccessModeSingleNodeReader HostVolumeAccessMode = "single-node-reader-only"
|
||||||
|
HostVolumeAccessModeSingleNodeWriter HostVolumeAccessMode = "single-node-writer"
|
||||||
|
HostVolumeAccessModeSingleNodeSingleWriter HostVolumeAccessMode = "single-node-single-writer"
|
||||||
|
HostVolumeAccessModeSingleNodeMultiWriter HostVolumeAccessMode = "single-node-multi-writer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HostVolumeStub is used for responses for the List Volumes endpoint
|
||||||
|
type HostVolumeStub struct {
|
||||||
|
Namespace string
|
||||||
|
ID string
|
||||||
|
Name string
|
||||||
|
PluginID string
|
||||||
|
NodePool string
|
||||||
|
NodeID string
|
||||||
|
CapacityBytes int64
|
||||||
|
State HostVolumeState
|
||||||
|
|
||||||
|
CreateIndex uint64
|
||||||
|
CreateTime int64
|
||||||
|
|
||||||
|
ModifyIndex uint64
|
||||||
|
ModifyTime int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostVolumes is used to access the host volumes API.
|
||||||
|
type HostVolumes struct {
|
||||||
|
client *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostVolumes returns a new handle on the host volumes API.
|
||||||
|
func (c *Client) HostVolumes() *HostVolumes {
|
||||||
|
return &HostVolumes{client: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
type HostVolumeCreateRequest struct {
|
||||||
|
Volume *HostVolume
|
||||||
|
|
||||||
|
// PolicyOverride overrides Sentinel soft-mandatory policy enforcement
|
||||||
|
PolicyOverride bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type HostVolumeRegisterRequest struct {
|
||||||
|
Volume *HostVolume
|
||||||
|
|
||||||
|
// PolicyOverride overrides Sentinel soft-mandatory policy enforcement
|
||||||
|
PolicyOverride bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type HostVolumeCreateResponse struct {
|
||||||
|
Volume *HostVolume
|
||||||
|
Warnings string
|
||||||
|
}
|
||||||
|
|
||||||
|
type HostVolumeRegisterResponse struct {
|
||||||
|
Volume *HostVolume
|
||||||
|
Warnings string
|
||||||
|
}
|
||||||
|
|
||||||
|
type HostVolumeListRequest struct {
|
||||||
|
NodeID string
|
||||||
|
NodePool string
|
||||||
|
}
|
||||||
|
|
||||||
|
type HostVolumeDeleteRequest struct {
|
||||||
|
ID string
|
||||||
|
Force bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type HostVolumeDeleteResponse struct{}
|
||||||
|
|
||||||
|
// Create forwards to client agents so a host volume can be created on those
|
||||||
|
// hosts, and registers the volume with Nomad servers.
|
||||||
|
func (hv *HostVolumes) Create(req *HostVolumeCreateRequest, opts *WriteOptions) (*HostVolumeCreateResponse, *WriteMeta, error) {
|
||||||
|
var out *HostVolumeCreateResponse
|
||||||
|
wm, err := hv.client.put("/v1/volume/host/create", req, &out, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, wm, err
|
||||||
|
}
|
||||||
|
return out, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register registers a host volume that was created out-of-band with the Nomad
|
||||||
|
// servers.
|
||||||
|
func (hv *HostVolumes) Register(req *HostVolumeRegisterRequest, opts *WriteOptions) (*HostVolumeRegisterResponse, *WriteMeta, error) {
|
||||||
|
var out *HostVolumeRegisterResponse
|
||||||
|
wm, err := hv.client.put("/v1/volume/host/register", req, &out, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, wm, err
|
||||||
|
}
|
||||||
|
return out, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get queries for a single host volume, by ID
|
||||||
|
func (hv *HostVolumes) Get(id string, opts *QueryOptions) (*HostVolume, *QueryMeta, error) {
|
||||||
|
var out *HostVolume
|
||||||
|
path, err := url.JoinPath("/v1/volume/host/", url.PathEscape(id))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
qm, err := hv.client.query(path, &out, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, qm, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List queries for a set of host volumes, by namespace, node, node pool, or
|
||||||
|
// name prefix.
|
||||||
|
func (hv *HostVolumes) List(req *HostVolumeListRequest, opts *QueryOptions) ([]*HostVolumeStub, *QueryMeta, error) {
|
||||||
|
var out []*HostVolumeStub
|
||||||
|
qv := url.Values{}
|
||||||
|
qv.Set("type", "host")
|
||||||
|
if req != nil {
|
||||||
|
if req.NodeID != "" {
|
||||||
|
qv.Set("node_id", req.NodeID)
|
||||||
|
}
|
||||||
|
if req.NodePool != "" {
|
||||||
|
qv.Set("node_pool", req.NodePool)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
qm, err := hv.client.query("/v1/volumes?"+qv.Encode(), &out, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, qm, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes a host volume
|
||||||
|
func (hv *HostVolumes) Delete(req *HostVolumeDeleteRequest, opts *WriteOptions) (*HostVolumeDeleteResponse, *WriteMeta, error) {
|
||||||
|
var resp *HostVolumeDeleteResponse
|
||||||
|
path, err := url.JoinPath("/v1/volume/host/", url.PathEscape(req.ID))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if req.Force {
|
||||||
|
path = path + "?force=true"
|
||||||
|
}
|
||||||
|
wm, err := hv.client.delete(path, nil, resp, opts)
|
||||||
|
return resp, wm, err
|
||||||
|
}
|
||||||
243
vendor/github.com/hashicorp/nomad/api/jobs.go
generated
vendored
243
vendor/github.com/hashicorp/nomad/api/jobs.go
generated
vendored
@@ -8,13 +8,13 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/cronexpr"
|
"github.com/hashicorp/cronexpr"
|
||||||
"golang.org/x/exp/maps"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -74,9 +74,6 @@ type JobsParseRequest struct {
|
|||||||
// JobHCL is an hcl jobspec
|
// JobHCL is an hcl jobspec
|
||||||
JobHCL string
|
JobHCL string
|
||||||
|
|
||||||
// HCLv1 indicates whether the JobHCL should be parsed with the hcl v1 parser
|
|
||||||
HCLv1 bool `json:"hclv1,omitempty"`
|
|
||||||
|
|
||||||
// Variables are HCL2 variables associated with the job. Only works with hcl2.
|
// Variables are HCL2 variables associated with the job. Only works with hcl2.
|
||||||
//
|
//
|
||||||
// Interpreted as if it were the content of a variables file.
|
// Interpreted as if it were the content of a variables file.
|
||||||
@@ -104,7 +101,7 @@ func (j *Jobs) ParseHCL(jobHCL string, canonicalize bool) (*Job, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ParseHCLOpts is used to request the server convert the HCL representation of a
|
// ParseHCLOpts is used to request the server convert the HCL representation of a
|
||||||
// Job to JSON on our behalf. Accepts HCL1 or HCL2 jobs as input.
|
// Job to JSON on our behalf. Only accepts HCL2 jobs as input.
|
||||||
func (j *Jobs) ParseHCLOpts(req *JobsParseRequest) (*Job, error) {
|
func (j *Jobs) ParseHCLOpts(req *JobsParseRequest) (*Job, error) {
|
||||||
var job Job
|
var job Job
|
||||||
_, err := j.client.put("/v1/jobs/parse", req, &job, nil)
|
_, err := j.client.put("/v1/jobs/parse", req, &job, nil)
|
||||||
@@ -179,7 +176,7 @@ func (j *Jobs) List(q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {
|
|||||||
return j.ListOptions(nil, q)
|
return j.ListOptions(nil, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
// List is used to list all of the existing jobs.
|
// ListOptions is used to list all of the existing jobs.
|
||||||
func (j *Jobs) ListOptions(opts *JobListOptions, q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {
|
func (j *Jobs) ListOptions(opts *JobListOptions, q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {
|
||||||
var resp []*JobListStub
|
var resp []*JobListStub
|
||||||
|
|
||||||
@@ -215,8 +212,7 @@ func (j *Jobs) Info(jobID string, q *QueryOptions) (*Job, *QueryMeta, error) {
|
|||||||
return &resp, qm, nil
|
return &resp, qm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scale is used to retrieve information about a particular
|
// Scale is used to scale a job.
|
||||||
// job given its unique ID.
|
|
||||||
func (j *Jobs) Scale(jobID, group string, count *int, message string, error bool, meta map[string]interface{},
|
func (j *Jobs) Scale(jobID, group string, count *int, message string, error bool, meta map[string]interface{},
|
||||||
q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
||||||
|
|
||||||
@@ -242,6 +238,17 @@ func (j *Jobs) Scale(jobID, group string, count *int, message string, error bool
|
|||||||
return &resp, qm, nil
|
return &resp, qm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ScaleWithRequest is used to scale a job, giving the caller complete control
|
||||||
|
// over the ScalingRequest
|
||||||
|
func (j *Jobs) ScaleWithRequest(jobID string, req *ScalingRequest, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
||||||
|
var resp JobRegisterResponse
|
||||||
|
qm, err := j.client.put(fmt.Sprintf("/v1/job/%s/scale", url.PathEscape(jobID)), req, &resp, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return &resp, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ScaleStatus is used to retrieve information about a particular
|
// ScaleStatus is used to retrieve information about a particular
|
||||||
// job given its unique ID.
|
// job given its unique ID.
|
||||||
func (j *Jobs) ScaleStatus(jobID string, q *QueryOptions) (*JobScaleStatusResponse, *QueryMeta, error) {
|
func (j *Jobs) ScaleStatus(jobID string, q *QueryOptions) (*JobScaleStatusResponse, *QueryMeta, error) {
|
||||||
@@ -256,8 +263,50 @@ func (j *Jobs) ScaleStatus(jobID string, q *QueryOptions) (*JobScaleStatusRespon
|
|||||||
// Versions is used to retrieve all versions of a particular job given its
|
// Versions is used to retrieve all versions of a particular job given its
|
||||||
// unique ID.
|
// unique ID.
|
||||||
func (j *Jobs) Versions(jobID string, diffs bool, q *QueryOptions) ([]*Job, []*JobDiff, *QueryMeta, error) {
|
func (j *Jobs) Versions(jobID string, diffs bool, q *QueryOptions) ([]*Job, []*JobDiff, *QueryMeta, error) {
|
||||||
|
opts := &VersionsOptions{
|
||||||
|
Diffs: diffs,
|
||||||
|
}
|
||||||
|
return j.VersionsOpts(jobID, opts, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionByTag is used to retrieve a job version by its VersionTag name.
|
||||||
|
func (j *Jobs) VersionByTag(jobID, tag string, q *QueryOptions) (*Job, *QueryMeta, error) {
|
||||||
|
versions, _, qm, err := j.Versions(jobID, false, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the version with the matching tag
|
||||||
|
for _, version := range versions {
|
||||||
|
if version.VersionTag != nil && version.VersionTag.Name == tag {
|
||||||
|
return version, qm, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil, fmt.Errorf("version tag %s not found for job %s", tag, jobID)
|
||||||
|
}
|
||||||
|
|
||||||
|
type VersionsOptions struct {
|
||||||
|
Diffs bool
|
||||||
|
DiffTag string
|
||||||
|
DiffVersion *uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *Jobs) VersionsOpts(jobID string, opts *VersionsOptions, q *QueryOptions) ([]*Job, []*JobDiff, *QueryMeta, error) {
|
||||||
var resp JobVersionsResponse
|
var resp JobVersionsResponse
|
||||||
qm, err := j.client.query(fmt.Sprintf("/v1/job/%s/versions?diffs=%v", url.PathEscape(jobID), diffs), &resp, q)
|
|
||||||
|
qp := url.Values{}
|
||||||
|
if opts != nil {
|
||||||
|
qp.Add("diffs", strconv.FormatBool(opts.Diffs))
|
||||||
|
if opts.DiffTag != "" {
|
||||||
|
qp.Add("diff_tag", opts.DiffTag)
|
||||||
|
}
|
||||||
|
if opts.DiffVersion != nil {
|
||||||
|
qp.Add("diff_version", strconv.FormatUint(*opts.DiffVersion, 10))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
qm, err := j.client.query(fmt.Sprintf("/v1/job/%s/versions?%s", url.PathEscape(jobID), qp.Encode()), &resp, q)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -274,6 +323,7 @@ func (j *Jobs) Submission(jobID string, version int, q *QueryOptions) (*JobSubmi
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &sub, qm, nil
|
return &sub, qm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -479,16 +529,39 @@ func (j *Jobs) Summary(jobID string, q *QueryOptions) (*JobSummary, *QueryMeta,
|
|||||||
return &resp, qm, nil
|
return &resp, qm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DispatchOptions is used to pass through job dispatch parameters
|
||||||
|
type DispatchOptions struct {
|
||||||
|
JobID string
|
||||||
|
Meta map[string]string
|
||||||
|
Payload []byte
|
||||||
|
IdPrefixTemplate string
|
||||||
|
Priority int
|
||||||
|
}
|
||||||
|
|
||||||
func (j *Jobs) Dispatch(jobID string, meta map[string]string,
|
func (j *Jobs) Dispatch(jobID string, meta map[string]string,
|
||||||
payload []byte, idPrefixTemplate string, q *WriteOptions) (*JobDispatchResponse, *WriteMeta, error) {
|
payload []byte, idPrefixTemplate string, q *WriteOptions) (*JobDispatchResponse, *WriteMeta, error) {
|
||||||
var resp JobDispatchResponse
|
|
||||||
req := &JobDispatchRequest{
|
return j.DispatchOpts(&DispatchOptions{
|
||||||
JobID: jobID,
|
JobID: jobID,
|
||||||
Meta: meta,
|
Meta: meta,
|
||||||
Payload: payload,
|
Payload: payload,
|
||||||
IdPrefixTemplate: idPrefixTemplate,
|
IdPrefixTemplate: idPrefixTemplate},
|
||||||
|
q,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DispatchOpts is used to dispatch a new job with the passed DispatchOpts. It
|
||||||
|
// returns the ID of the evaluation, along with any errors encountered.
|
||||||
|
func (j *Jobs) DispatchOpts(opts *DispatchOptions, q *WriteOptions) (*JobDispatchResponse, *WriteMeta, error) {
|
||||||
|
var resp JobDispatchResponse
|
||||||
|
req := &JobDispatchRequest{
|
||||||
|
JobID: opts.JobID,
|
||||||
|
Meta: opts.Meta,
|
||||||
|
Payload: opts.Payload,
|
||||||
|
IdPrefixTemplate: opts.IdPrefixTemplate,
|
||||||
|
Priority: opts.Priority,
|
||||||
}
|
}
|
||||||
wm, err := j.client.put("/v1/job/"+url.PathEscape(jobID)+"/dispatch", req, &resp, q)
|
wm, err := j.client.put("/v1/job/"+url.PathEscape(opts.JobID)+"/dispatch", req, &resp, q)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -499,15 +572,13 @@ func (j *Jobs) Dispatch(jobID string, meta map[string]string,
|
|||||||
// enforceVersion is set, the job is only reverted if the current version is at
|
// enforceVersion is set, the job is only reverted if the current version is at
|
||||||
// the passed version.
|
// the passed version.
|
||||||
func (j *Jobs) Revert(jobID string, version uint64, enforcePriorVersion *uint64,
|
func (j *Jobs) Revert(jobID string, version uint64, enforcePriorVersion *uint64,
|
||||||
q *WriteOptions, consulToken, vaultToken string) (*JobRegisterResponse, *WriteMeta, error) {
|
q *WriteOptions, _ string, _ string) (*JobRegisterResponse, *WriteMeta, error) {
|
||||||
|
|
||||||
var resp JobRegisterResponse
|
var resp JobRegisterResponse
|
||||||
req := &JobRevertRequest{
|
req := &JobRevertRequest{
|
||||||
JobID: jobID,
|
JobID: jobID,
|
||||||
JobVersion: version,
|
JobVersion: version,
|
||||||
EnforcePriorVersion: enforcePriorVersion,
|
EnforcePriorVersion: enforcePriorVersion,
|
||||||
ConsulToken: consulToken,
|
|
||||||
VaultToken: vaultToken,
|
|
||||||
}
|
}
|
||||||
wm, err := j.client.put("/v1/job/"+url.PathEscape(jobID)+"/revert", req, &resp, q)
|
wm, err := j.client.put("/v1/job/"+url.PathEscape(jobID)+"/revert", req, &resp, q)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -918,10 +989,11 @@ type ParameterizedJobConfig struct {
|
|||||||
// the job submission.
|
// the job submission.
|
||||||
type JobSubmission struct {
|
type JobSubmission struct {
|
||||||
// Source contains the original job definition (may be in the format of
|
// Source contains the original job definition (may be in the format of
|
||||||
// hcl1, hcl2, or json).
|
// hcl1, hcl2, or json). HCL1 jobs can no longer be parsed.
|
||||||
Source string
|
Source string
|
||||||
|
|
||||||
// Format indicates what the Source content was (hcl1, hcl2, or json).
|
// Format indicates what the Source content was (hcl1, hcl2, or json). HCL1
|
||||||
|
// jobs can no longer be parsed.
|
||||||
Format string
|
Format string
|
||||||
|
|
||||||
// VariableFlags contains the CLI "-var" flag arguments as submitted with the
|
// VariableFlags contains the CLI "-var" flag arguments as submitted with the
|
||||||
@@ -933,6 +1005,70 @@ type JobSubmission struct {
|
|||||||
Variables string
|
Variables string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type JobUIConfig struct {
|
||||||
|
Description string `hcl:"description,optional"`
|
||||||
|
Links []*JobUILink `hcl:"link,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JobUILink struct {
|
||||||
|
Label string `hcl:"label,optional"`
|
||||||
|
URL string `hcl:"url,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUIConfig) Canonicalize() {
|
||||||
|
if j == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(j.Links) == 0 {
|
||||||
|
j.Links = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUIConfig) Copy() *JobUIConfig {
|
||||||
|
if j == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
copy := new(JobUIConfig)
|
||||||
|
copy.Description = j.Description
|
||||||
|
|
||||||
|
for _, link := range j.Links {
|
||||||
|
copy.Links = append(copy.Links, link.Copy())
|
||||||
|
}
|
||||||
|
|
||||||
|
return copy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUILink) Copy() *JobUILink {
|
||||||
|
if j == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &JobUILink{
|
||||||
|
Label: j.Label,
|
||||||
|
URL: j.URL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type JobVersionTag struct {
|
||||||
|
Name string
|
||||||
|
Description string
|
||||||
|
TaggedTime int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobVersionTag) Copy() *JobVersionTag {
|
||||||
|
if j == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &JobVersionTag{
|
||||||
|
Name: j.Name,
|
||||||
|
Description: j.Description,
|
||||||
|
TaggedTime: j.TaggedTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (js *JobSubmission) Canonicalize() {
|
func (js *JobSubmission) Canonicalize() {
|
||||||
if js == nil {
|
if js == nil {
|
||||||
return
|
return
|
||||||
@@ -941,6 +1077,13 @@ func (js *JobSubmission) Canonicalize() {
|
|||||||
if len(js.VariableFlags) == 0 {
|
if len(js.VariableFlags) == 0 {
|
||||||
js.VariableFlags = nil
|
js.VariableFlags = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if there are multiline variables, make sure we escape the newline
|
||||||
|
// characters to preserve them. This way, when the job gets stopped and
|
||||||
|
// restarted in the UI, variable values will be parsed correctly.
|
||||||
|
for k, v := range js.VariableFlags {
|
||||||
|
js.VariableFlags[k] = url.QueryEscape(v)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (js *JobSubmission) Copy() *JobSubmission {
|
func (js *JobSubmission) Copy() *JobSubmission {
|
||||||
@@ -980,8 +1123,7 @@ type Job struct {
|
|||||||
Reschedule *ReschedulePolicy `hcl:"reschedule,block"`
|
Reschedule *ReschedulePolicy `hcl:"reschedule,block"`
|
||||||
Migrate *MigrateStrategy `hcl:"migrate,block"`
|
Migrate *MigrateStrategy `hcl:"migrate,block"`
|
||||||
Meta map[string]string `hcl:"meta,block"`
|
Meta map[string]string `hcl:"meta,block"`
|
||||||
ConsulToken *string `mapstructure:"consul_token" hcl:"consul_token,optional"`
|
UI *JobUIConfig `hcl:"ui,block"`
|
||||||
VaultToken *string `mapstructure:"vault_token" hcl:"vault_token,optional"`
|
|
||||||
|
|
||||||
/* Fields set by server, not sourced from job config file */
|
/* Fields set by server, not sourced from job config file */
|
||||||
|
|
||||||
@@ -1001,6 +1143,7 @@ type Job struct {
|
|||||||
CreateIndex *uint64
|
CreateIndex *uint64
|
||||||
ModifyIndex *uint64
|
ModifyIndex *uint64
|
||||||
JobModifyIndex *uint64
|
JobModifyIndex *uint64
|
||||||
|
VersionTag *JobVersionTag
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsPeriodic returns whether a job is periodic.
|
// IsPeriodic returns whether a job is periodic.
|
||||||
@@ -1049,15 +1192,9 @@ func (j *Job) Canonicalize() {
|
|||||||
if j.AllAtOnce == nil {
|
if j.AllAtOnce == nil {
|
||||||
j.AllAtOnce = pointerOf(false)
|
j.AllAtOnce = pointerOf(false)
|
||||||
}
|
}
|
||||||
if j.ConsulToken == nil {
|
|
||||||
j.ConsulToken = pointerOf("")
|
|
||||||
}
|
|
||||||
if j.ConsulNamespace == nil {
|
if j.ConsulNamespace == nil {
|
||||||
j.ConsulNamespace = pointerOf("")
|
j.ConsulNamespace = pointerOf("")
|
||||||
}
|
}
|
||||||
if j.VaultToken == nil {
|
|
||||||
j.VaultToken = pointerOf("")
|
|
||||||
}
|
|
||||||
if j.VaultNamespace == nil {
|
if j.VaultNamespace == nil {
|
||||||
j.VaultNamespace = pointerOf("")
|
j.VaultNamespace = pointerOf("")
|
||||||
}
|
}
|
||||||
@@ -1107,6 +1244,10 @@ func (j *Job) Canonicalize() {
|
|||||||
for _, a := range j.Affinities {
|
for _, a := range j.Affinities {
|
||||||
a.Canonicalize()
|
a.Canonicalize()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if j.UI != nil {
|
||||||
|
j.UI.Canonicalize()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LookupTaskGroup finds a task group by name
|
// LookupTaskGroup finds a task group by name
|
||||||
@@ -1279,6 +1420,15 @@ func (j *Job) AddSpread(s *Spread) *Job {
|
|||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (j *Job) GetScalingPoliciesPerTaskGroup() map[string]*ScalingPolicy {
|
||||||
|
ret := map[string]*ScalingPolicy{}
|
||||||
|
for _, tg := range j.TaskGroups {
|
||||||
|
ret[*tg.Name] = tg.Scaling
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
type WriteRequest struct {
|
type WriteRequest struct {
|
||||||
// The target region for this write
|
// The target region for this write
|
||||||
Region string
|
Region string
|
||||||
@@ -1325,18 +1475,6 @@ type JobRevertRequest struct {
|
|||||||
// version before reverting.
|
// version before reverting.
|
||||||
EnforcePriorVersion *uint64
|
EnforcePriorVersion *uint64
|
||||||
|
|
||||||
// ConsulToken is the Consul token that proves the submitter of the job revert
|
|
||||||
// has access to the Service Identity policies associated with the job's
|
|
||||||
// Consul Connect enabled services. This field is only used to transfer the
|
|
||||||
// token and is not stored after the Job revert.
|
|
||||||
ConsulToken string `json:",omitempty"`
|
|
||||||
|
|
||||||
// VaultToken is the Vault token that proves the submitter of the job revert
|
|
||||||
// has access to any Vault policies specified in the targeted job version. This
|
|
||||||
// field is only used to authorize the revert and is not stored after the Job
|
|
||||||
// revert.
|
|
||||||
VaultToken string `json:",omitempty"`
|
|
||||||
|
|
||||||
WriteRequest
|
WriteRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1465,6 +1603,7 @@ type JobDispatchRequest struct {
|
|||||||
Payload []byte
|
Payload []byte
|
||||||
Meta map[string]string
|
Meta map[string]string
|
||||||
IdPrefixTemplate string
|
IdPrefixTemplate string
|
||||||
|
Priority int
|
||||||
}
|
}
|
||||||
|
|
||||||
type JobDispatchResponse struct {
|
type JobDispatchResponse struct {
|
||||||
@@ -1544,3 +1683,31 @@ func (j *Jobs) ActionExec(ctx context.Context,
|
|||||||
|
|
||||||
return s.run(ctx)
|
return s.run(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// JobStatusesRequest is used to get statuses for jobs,
|
||||||
|
// their allocations and deployments.
|
||||||
|
type JobStatusesRequest struct {
|
||||||
|
// Jobs may be optionally provided to request a subset of specific jobs.
|
||||||
|
Jobs []NamespacedID
|
||||||
|
// IncludeChildren will include child (batch) jobs in the response.
|
||||||
|
IncludeChildren bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type TagVersionRequest struct {
|
||||||
|
Version uint64
|
||||||
|
Description string
|
||||||
|
WriteRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *Jobs) TagVersion(jobID string, version uint64, name string, description string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
var tagRequest = &TagVersionRequest{
|
||||||
|
Version: version,
|
||||||
|
Description: description,
|
||||||
|
}
|
||||||
|
|
||||||
|
return j.client.put("/v1/job/"+url.PathEscape(jobID)+"/versions/"+name+"/tag", tagRequest, nil, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *Jobs) UntagVersion(jobID string, name string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
return j.client.delete("/v1/job/"+url.PathEscape(jobID)+"/versions/"+name+"/tag", nil, nil, q)
|
||||||
|
}
|
||||||
|
|||||||
26
vendor/github.com/hashicorp/nomad/api/keyring.go
generated
vendored
26
vendor/github.com/hashicorp/nomad/api/keyring.go
generated
vendored
@@ -6,6 +6,7 @@ package api
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Keyring is used to access the Variables keyring.
|
// Keyring is used to access the Variables keyring.
|
||||||
@@ -34,16 +35,18 @@ type RootKeyMeta struct {
|
|||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
State RootKeyState
|
State RootKeyState
|
||||||
|
PublishTime int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// RootKeyState enum describes the lifecycle of a root key.
|
// RootKeyState enum describes the lifecycle of a root key.
|
||||||
type RootKeyState string
|
type RootKeyState string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
RootKeyStateInactive RootKeyState = "inactive"
|
RootKeyStateInactive RootKeyState = "inactive"
|
||||||
RootKeyStateActive = "active"
|
RootKeyStateActive = "active"
|
||||||
RootKeyStateRekeying = "rekeying"
|
RootKeyStateRekeying = "rekeying"
|
||||||
RootKeyStateDeprecated = "deprecated"
|
RootKeyStateDeprecated = "deprecated"
|
||||||
|
RootKeyStatePrepublished = "prepublished"
|
||||||
)
|
)
|
||||||
|
|
||||||
// List lists all the keyring metadata
|
// List lists all the keyring metadata
|
||||||
@@ -58,14 +61,17 @@ func (k *Keyring) List(q *QueryOptions) ([]*RootKeyMeta, *QueryMeta, error) {
|
|||||||
|
|
||||||
// Delete deletes a specific inactive key from the keyring
|
// Delete deletes a specific inactive key from the keyring
|
||||||
func (k *Keyring) Delete(opts *KeyringDeleteOptions, w *WriteOptions) (*WriteMeta, error) {
|
func (k *Keyring) Delete(opts *KeyringDeleteOptions, w *WriteOptions) (*WriteMeta, error) {
|
||||||
wm, err := k.client.delete(fmt.Sprintf("/v1/operator/keyring/key/%v",
|
wm, err := k.client.delete(fmt.Sprintf("/v1/operator/keyring/key/%v?force=%v",
|
||||||
url.PathEscape(opts.KeyID)), nil, nil, w)
|
url.PathEscape(opts.KeyID), strconv.FormatBool(opts.Force)), nil, nil, w)
|
||||||
return wm, err
|
return wm, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// KeyringDeleteOptions are parameters for the Delete API
|
// KeyringDeleteOptions are parameters for the Delete API
|
||||||
type KeyringDeleteOptions struct {
|
type KeyringDeleteOptions struct {
|
||||||
KeyID string // UUID
|
KeyID string // UUID
|
||||||
|
// Force can be used to force deletion of a root keyring that was used to encrypt
|
||||||
|
// an existing variable or to sign a workload identity
|
||||||
|
Force bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rotate requests a key rotation
|
// Rotate requests a key rotation
|
||||||
@@ -78,6 +84,9 @@ func (k *Keyring) Rotate(opts *KeyringRotateOptions, w *WriteOptions) (*RootKeyM
|
|||||||
if opts.Full {
|
if opts.Full {
|
||||||
qp.Set("full", "true")
|
qp.Set("full", "true")
|
||||||
}
|
}
|
||||||
|
if opts.PublishTime > 0 {
|
||||||
|
qp.Set("publish_time", fmt.Sprintf("%d", opts.PublishTime))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
resp := &struct{ Key *RootKeyMeta }{}
|
resp := &struct{ Key *RootKeyMeta }{}
|
||||||
wm, err := k.client.put("/v1/operator/keyring/rotate?"+qp.Encode(), nil, resp, w)
|
wm, err := k.client.put("/v1/operator/keyring/rotate?"+qp.Encode(), nil, resp, w)
|
||||||
@@ -86,6 +95,7 @@ func (k *Keyring) Rotate(opts *KeyringRotateOptions, w *WriteOptions) (*RootKeyM
|
|||||||
|
|
||||||
// KeyringRotateOptions are parameters for the Rotate API
|
// KeyringRotateOptions are parameters for the Rotate API
|
||||||
type KeyringRotateOptions struct {
|
type KeyringRotateOptions struct {
|
||||||
Full bool
|
Full bool
|
||||||
Algorithm EncryptionAlgorithm
|
Algorithm EncryptionAlgorithm
|
||||||
|
PublishTime int64
|
||||||
}
|
}
|
||||||
|
|||||||
15
vendor/github.com/hashicorp/nomad/api/namespace.go
generated
vendored
15
vendor/github.com/hashicorp/nomad/api/namespace.go
generated
vendored
@@ -85,8 +85,10 @@ type Namespace struct {
|
|||||||
// NamespaceCapabilities represents a set of capabilities allowed for this
|
// NamespaceCapabilities represents a set of capabilities allowed for this
|
||||||
// namespace, to be checked at job submission time.
|
// namespace, to be checked at job submission time.
|
||||||
type NamespaceCapabilities struct {
|
type NamespaceCapabilities struct {
|
||||||
EnabledTaskDrivers []string `hcl:"enabled_task_drivers"`
|
EnabledTaskDrivers []string `hcl:"enabled_task_drivers"`
|
||||||
DisabledTaskDrivers []string `hcl:"disabled_task_drivers"`
|
DisabledTaskDrivers []string `hcl:"disabled_task_drivers"`
|
||||||
|
EnabledNetworkModes []string `hcl:"enabled_network_modes"`
|
||||||
|
DisabledNetworkModes []string `hcl:"disabled_network_modes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NamespaceNodePoolConfiguration stores configuration about node pools for a
|
// NamespaceNodePoolConfiguration stores configuration about node pools for a
|
||||||
@@ -156,3 +158,12 @@ func (n NamespaceIndexSort) Less(i, j int) bool {
|
|||||||
func (n NamespaceIndexSort) Swap(i, j int) {
|
func (n NamespaceIndexSort) Swap(i, j int) {
|
||||||
n[i], n[j] = n[j], n[i]
|
n[i], n[j] = n[j], n[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NamespacedID is used for things that are unique only per-namespace,
|
||||||
|
// such as jobs.
|
||||||
|
type NamespacedID struct {
|
||||||
|
// Namespace is the Name of the Namespace
|
||||||
|
Namespace string
|
||||||
|
// ID is the ID of the namespaced object (e.g. Job ID)
|
||||||
|
ID string
|
||||||
|
}
|
||||||
|
|||||||
9
vendor/github.com/hashicorp/nomad/api/nodes.go
generated
vendored
9
vendor/github.com/hashicorp/nomad/api/nodes.go
generated
vendored
@@ -126,7 +126,7 @@ func (n *Nodes) UpdateDrain(nodeID string, spec *DrainSpec, markEligible bool, q
|
|||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateDrainWithMeta is used to update the drain strategy for a given node. If
|
// UpdateDrainOpts is used to update the drain strategy for a given node. If
|
||||||
// markEligible is true and the drain is being removed, the node will be marked
|
// markEligible is true and the drain is being removed, the node will be marked
|
||||||
// as having its scheduling being eligible
|
// as having its scheduling being eligible
|
||||||
func (n *Nodes) UpdateDrainOpts(nodeID string, opts *DrainOptions, q *WriteOptions) (*NodeDrainUpdateResponse,
|
func (n *Nodes) UpdateDrainOpts(nodeID string, opts *DrainOptions, q *WriteOptions) (*NodeDrainUpdateResponse,
|
||||||
@@ -478,7 +478,7 @@ func (n *Nodes) GC(nodeID string, q *QueryOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO Add tests
|
// GcAlloc - TODO Add tests
|
||||||
func (n *Nodes) GcAlloc(allocID string, q *QueryOptions) error {
|
func (n *Nodes) GcAlloc(allocID string, q *QueryOptions) error {
|
||||||
path := fmt.Sprintf("/v1/client/allocation/%s/gc", allocID)
|
path := fmt.Sprintf("/v1/client/allocation/%s/gc", allocID)
|
||||||
_, err := n.client.query(path, nil, q)
|
_, err := n.client.query(path, nil, q)
|
||||||
@@ -517,6 +517,8 @@ type DriverInfo struct {
|
|||||||
type HostVolumeInfo struct {
|
type HostVolumeInfo struct {
|
||||||
Path string
|
Path string
|
||||||
ReadOnly bool
|
ReadOnly bool
|
||||||
|
// ID is set for dynamic host volumes only.
|
||||||
|
ID string
|
||||||
}
|
}
|
||||||
|
|
||||||
// HostNetworkInfo is used to return metadata about a given HostNetwork
|
// HostNetworkInfo is used to return metadata about a given HostNetwork
|
||||||
@@ -564,12 +566,14 @@ type Node struct {
|
|||||||
Events []*NodeEvent
|
Events []*NodeEvent
|
||||||
Drivers map[string]*DriverInfo
|
Drivers map[string]*DriverInfo
|
||||||
HostVolumes map[string]*HostVolumeInfo
|
HostVolumes map[string]*HostVolumeInfo
|
||||||
|
GCVolumesOnNodeGC bool
|
||||||
HostNetworks map[string]*HostNetworkInfo
|
HostNetworks map[string]*HostNetworkInfo
|
||||||
CSIControllerPlugins map[string]*CSIInfo
|
CSIControllerPlugins map[string]*CSIInfo
|
||||||
CSINodePlugins map[string]*CSIInfo
|
CSINodePlugins map[string]*CSIInfo
|
||||||
LastDrain *DrainMetadata
|
LastDrain *DrainMetadata
|
||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
|
NodeMaxAllocs int
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodeResources struct {
|
type NodeResources struct {
|
||||||
@@ -783,6 +787,7 @@ type HostStats struct {
|
|||||||
Memory *HostMemoryStats
|
Memory *HostMemoryStats
|
||||||
CPU []*HostCPUStats
|
CPU []*HostCPUStats
|
||||||
DiskStats []*HostDiskStats
|
DiskStats []*HostDiskStats
|
||||||
|
AllocDirStats *HostDiskStats
|
||||||
DeviceStats []*DeviceGroupStats
|
DeviceStats []*DeviceGroupStats
|
||||||
Uptime uint64
|
Uptime uint64
|
||||||
CPUTicksConsumed float64
|
CPUTicksConsumed float64
|
||||||
|
|||||||
107
vendor/github.com/hashicorp/nomad/api/operator.go
generated
vendored
107
vendor/github.com/hashicorp/nomad/api/operator.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -66,7 +67,7 @@ func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, e
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
_, resp, err := requireOK(op.c.doRequest(r))
|
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -82,6 +83,10 @@ func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, e
|
|||||||
// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
|
// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
|
||||||
// quorum but no longer known to Serf or the catalog) by address in the form of
|
// quorum but no longer known to Serf or the catalog) by address in the form of
|
||||||
// "IP:port".
|
// "IP:port".
|
||||||
|
//
|
||||||
|
// DEPRECATED: this method supported Raft Protocol v2, which was removed from
|
||||||
|
// Nomad in 1.4.0. The address parameter of the HTTP endpoint has been made
|
||||||
|
// non-function in Nomad 1.10.x and will be removed in Nomad 1.12.0.
|
||||||
func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
|
func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
|
||||||
r, err := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
|
r, err := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -91,7 +96,7 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err
|
|||||||
|
|
||||||
r.params.Set("address", address)
|
r.params.Set("address", address)
|
||||||
|
|
||||||
_, resp, err := requireOK(op.c.doRequest(r))
|
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -111,7 +116,7 @@ func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error {
|
|||||||
|
|
||||||
r.params.Set("id", id)
|
r.params.Set("id", id)
|
||||||
|
|
||||||
_, resp, err := requireOK(op.c.doRequest(r))
|
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -131,7 +136,7 @@ func (op *Operator) RaftTransferLeadershipByAddress(address string, q *WriteOpti
|
|||||||
|
|
||||||
r.params.Set("address", address)
|
r.params.Set("address", address)
|
||||||
|
|
||||||
_, resp, err := requireOK(op.c.doRequest(r))
|
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -151,7 +156,7 @@ func (op *Operator) RaftTransferLeadershipByID(id string, q *WriteOptions) error
|
|||||||
|
|
||||||
r.params.Set("id", id)
|
r.params.Set("id", id)
|
||||||
|
|
||||||
_, resp, err := requireOK(op.c.doRequest(r))
|
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -262,7 +267,7 @@ func (op *Operator) Snapshot(q *QueryOptions) (io.ReadCloser, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
_, resp, err := requireOK(op.c.doRequest(r))
|
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -273,7 +278,6 @@ func (op *Operator) Snapshot(q *QueryOptions) (io.ReadCloser, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
io.Copy(io.Discard, resp.Body)
|
io.Copy(io.Discard, resp.Body)
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -355,7 +359,7 @@ func (op *Operator) ApplyLicense(license string, opts *ApplyLicenseOptions, q *W
|
|||||||
r.setWriteOptions(q)
|
r.setWriteOptions(q)
|
||||||
r.body = strings.NewReader(license)
|
r.body = strings.NewReader(license)
|
||||||
|
|
||||||
rtt, resp, err := requireOK(op.c.doRequest(r))
|
rtt, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -375,7 +379,7 @@ func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, *QueryMeta, erro
|
|||||||
req.setQueryOptions(q)
|
req.setQueryOptions(q)
|
||||||
|
|
||||||
var reply LicenseReply
|
var reply LicenseReply
|
||||||
rtt, resp, err := op.c.doRequest(req)
|
rtt, resp, err := op.c.doRequest(req) //nolint:bodyclose
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -412,3 +416,88 @@ type LeadershipTransferResponse struct {
|
|||||||
|
|
||||||
WriteMeta
|
WriteMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VaultWorkloadIdentityUpgradeCheck is the result of verifying if the cluster
|
||||||
|
// is ready to switch to workload identities for Vault.
|
||||||
|
type VaultWorkloadIdentityUpgradeCheck struct {
|
||||||
|
// JobsWithoutVaultIdentity is the list of jobs that have a `vault` block
|
||||||
|
// but do not have an `identity` for Vault.
|
||||||
|
JobsWithoutVaultIdentity []*JobListStub
|
||||||
|
|
||||||
|
// OutdatedNodes is the list of nodes running a version of Nomad that does
|
||||||
|
// not support workload identities for Vault.
|
||||||
|
OutdatedNodes []*NodeListStub
|
||||||
|
|
||||||
|
// VaultTokens is the list of Vault ACL token accessors that Nomad created
|
||||||
|
// and will no longer manage after the cluster is migrated to workload
|
||||||
|
// identities.
|
||||||
|
VaultTokens []*VaultAccessor
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ready returns true if the cluster is ready to migrate to workload identities
|
||||||
|
// with Vault.
|
||||||
|
func (v *VaultWorkloadIdentityUpgradeCheck) Ready() bool {
|
||||||
|
return v != nil &&
|
||||||
|
len(v.VaultTokens) == 0 &&
|
||||||
|
len(v.OutdatedNodes) == 0 &&
|
||||||
|
len(v.JobsWithoutVaultIdentity) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// VaultAccessor is a Vault ACL token created by Nomad for a task to access
|
||||||
|
// Vault using the legacy authentication flow.
|
||||||
|
type VaultAccessor struct {
|
||||||
|
// AllocID is the ID of the allocation that requested this token.
|
||||||
|
AllocID string
|
||||||
|
|
||||||
|
// Task is the name of the task that requested this token.
|
||||||
|
Task string
|
||||||
|
|
||||||
|
// NodeID is the ID of the node running the allocation that requested this
|
||||||
|
// token.
|
||||||
|
NodeID string
|
||||||
|
|
||||||
|
// Accessor is the Vault ACL token accessor ID.
|
||||||
|
Accessor string
|
||||||
|
|
||||||
|
// CreationTTL is the TTL set when the token was created.
|
||||||
|
CreationTTL int
|
||||||
|
|
||||||
|
// CreateIndex is the Raft index when the token was created.
|
||||||
|
CreateIndex uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpgradeCheckVaultWorkloadIdentity retrieves the cluster status for migrating
|
||||||
|
// to workload identities with Vault.
|
||||||
|
func (op *Operator) UpgradeCheckVaultWorkloadIdentity(q *QueryOptions) (*VaultWorkloadIdentityUpgradeCheck, *QueryMeta, error) {
|
||||||
|
var resp VaultWorkloadIdentityUpgradeCheck
|
||||||
|
qm, err := op.c.query("/v1/operator/upgrade-check/vault-workload-identity", &resp, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return &resp, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type OperatorUtilizationOptions struct {
|
||||||
|
TodayOnly bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type OperatorUtilizationSnapshotResponse struct {
|
||||||
|
// Bundle is the JSON serialized utilization reporting bundle.
|
||||||
|
Bundle []byte
|
||||||
|
WriteMeta
|
||||||
|
}
|
||||||
|
|
||||||
|
// Utilization retrieves a utilization reporting bundle (Nomad Enterprise only).
|
||||||
|
func (op *Operator) Utilization(opts *OperatorUtilizationOptions, w *WriteOptions) (*OperatorUtilizationSnapshotResponse, *WriteMeta, error) {
|
||||||
|
resp := &OperatorUtilizationSnapshotResponse{}
|
||||||
|
v := url.Values{}
|
||||||
|
if opts.TodayOnly {
|
||||||
|
v.Add("today", "true")
|
||||||
|
}
|
||||||
|
|
||||||
|
wm, err := op.c.post("/v1/operator/utilization?"+v.Encode(), nil, resp, w)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return resp, wm, nil
|
||||||
|
}
|
||||||
|
|||||||
80
vendor/github.com/hashicorp/nomad/api/operator_autopilot.go
generated
vendored
80
vendor/github.com/hashicorp/nomad/api/operator_autopilot.go
generated
vendored
@@ -178,6 +178,86 @@ type OperatorHealthReply struct {
|
|||||||
|
|
||||||
// Servers holds the health of each server.
|
// Servers holds the health of each server.
|
||||||
Servers []ServerHealth
|
Servers []ServerHealth
|
||||||
|
|
||||||
|
// The ID of the current leader.
|
||||||
|
Leader string
|
||||||
|
|
||||||
|
// List of servers that are voters in the Raft configuration.
|
||||||
|
Voters []string
|
||||||
|
|
||||||
|
// ReadReplicas holds the list of servers that are
|
||||||
|
// read replicas in the Raft configuration. (Enterprise only)
|
||||||
|
ReadReplicas []string `json:",omitempty"`
|
||||||
|
|
||||||
|
// RedundancyZones holds the list of servers in each redundancy zone.
|
||||||
|
// (Enterprise only)
|
||||||
|
RedundancyZones map[string]AutopilotZone `json:",omitempty"`
|
||||||
|
|
||||||
|
// Upgrade holds the current upgrade status.
|
||||||
|
Upgrade *AutopilotUpgrade `json:",omitempty"`
|
||||||
|
|
||||||
|
// The number of servers that could be lost without an outage
|
||||||
|
// occurring if all the voters don't fail at once. (Enterprise only)
|
||||||
|
OptimisticFailureTolerance int `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotZone holds the list of servers in a redundancy zone. (Enterprise only)
|
||||||
|
type AutopilotZone struct {
|
||||||
|
// Servers holds the list of servers in the redundancy zone.
|
||||||
|
Servers []string
|
||||||
|
|
||||||
|
// Voters holds the list of servers that are voters in the redundancy zone.
|
||||||
|
Voters []string
|
||||||
|
|
||||||
|
// FailureTolerance is the number of servers that could be lost without an
|
||||||
|
// outage occurring.
|
||||||
|
FailureTolerance int
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotUpgrade holds the current upgrade status. (Enterprise only)
|
||||||
|
type AutopilotUpgrade struct {
|
||||||
|
// Status of the upgrade.
|
||||||
|
Status string
|
||||||
|
|
||||||
|
// TargetVersion is the version that the cluster is upgrading to.
|
||||||
|
TargetVersion string
|
||||||
|
|
||||||
|
// TargetVersionVoters holds the list of servers that are voters in the Raft
|
||||||
|
// configuration of the TargetVersion.
|
||||||
|
TargetVersionVoters []string
|
||||||
|
|
||||||
|
// TargetVersionNonVoters holds the list of servers that are non-voters in
|
||||||
|
// the Raft configuration of the TargetVersion.
|
||||||
|
TargetVersionNonVoters []string
|
||||||
|
|
||||||
|
// TargetVersionReadReplicas holds the list of servers that are read
|
||||||
|
// replicas in the Raft configuration of the TargetVersion.
|
||||||
|
TargetVersionReadReplicas []string
|
||||||
|
|
||||||
|
// OtherVersionVoters holds the list of servers that are voters in the Raft
|
||||||
|
// configuration of a version other than the TargetVersion.
|
||||||
|
OtherVersionVoters []string
|
||||||
|
|
||||||
|
// OtherVersionNonVoters holds the list of servers that are non-voters in
|
||||||
|
// the Raft configuration of a version other than the TargetVersion.
|
||||||
|
OtherVersionNonVoters []string
|
||||||
|
|
||||||
|
// OtherVersionReadReplicas holds the list of servers that are read replicas
|
||||||
|
// in the Raft configuration of a version other than the TargetVersion.
|
||||||
|
OtherVersionReadReplicas []string
|
||||||
|
|
||||||
|
// RedundancyZones holds the list of servers in each redundancy zone for the
|
||||||
|
// TargetVersion.
|
||||||
|
RedundancyZones map[string]AutopilotZoneUpgradeVersions
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutopilotZoneUpgradeVersions holds the list of servers
|
||||||
|
// in a redundancy zone for a specific version. (Enterprise only)
|
||||||
|
type AutopilotZoneUpgradeVersions struct {
|
||||||
|
TargetVersionVoters []string
|
||||||
|
TargetVersionNonVoters []string
|
||||||
|
OtherVersionVoters []string
|
||||||
|
OtherVersionNonVoters []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// AutopilotGetConfiguration is used to query the current Autopilot configuration.
|
// AutopilotGetConfiguration is used to query the current Autopilot configuration.
|
||||||
|
|||||||
30
vendor/github.com/hashicorp/nomad/api/quota.go
generated
vendored
30
vendor/github.com/hashicorp/nomad/api/quota.go
generated
vendored
@@ -51,7 +51,7 @@ func (q *Quotas) ListUsage(qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error)
|
|||||||
return resp, qm, nil
|
return resp, qm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrefixList is used to do a PrefixList search over quota usages
|
// PrefixListUsage is used to do a PrefixList search over quota usages
|
||||||
func (q *Quotas) PrefixListUsage(prefix string, qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) {
|
func (q *Quotas) PrefixListUsage(prefix string, qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) {
|
||||||
if qo == nil {
|
if qo == nil {
|
||||||
qo = &QueryOptions{Prefix: prefix}
|
qo = &QueryOptions{Prefix: prefix}
|
||||||
@@ -127,17 +127,43 @@ type QuotaLimit struct {
|
|||||||
// referencing namespace in the region. A value of zero is treated as
|
// referencing namespace in the region. A value of zero is treated as
|
||||||
// unlimited and a negative value is treated as fully disallowed. This is
|
// unlimited and a negative value is treated as fully disallowed. This is
|
||||||
// useful for once we support GPUs
|
// useful for once we support GPUs
|
||||||
RegionLimit *Resources
|
RegionLimit *QuotaResources
|
||||||
|
|
||||||
// VariablesLimit is the maximum total size of all variables
|
// VariablesLimit is the maximum total size of all variables
|
||||||
// Variable.EncryptedData. A value of zero is treated as unlimited and a
|
// Variable.EncryptedData. A value of zero is treated as unlimited and a
|
||||||
// negative value is treated as fully disallowed.
|
// negative value is treated as fully disallowed.
|
||||||
|
//
|
||||||
|
// DEPRECATED: use RegionLimit.Storage.VariablesMB instead. This field will
|
||||||
|
// be removed in Nomad 1.12.0.
|
||||||
VariablesLimit *int `mapstructure:"variables_limit" hcl:"variables_limit,optional"`
|
VariablesLimit *int `mapstructure:"variables_limit" hcl:"variables_limit,optional"`
|
||||||
|
|
||||||
// Hash is the hash of the object and is used to make replication efficient.
|
// Hash is the hash of the object and is used to make replication efficient.
|
||||||
Hash []byte
|
Hash []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type QuotaResources struct {
|
||||||
|
CPU *int `hcl:"cpu,optional"`
|
||||||
|
Cores *int `hcl:"cores,optional"`
|
||||||
|
MemoryMB *int `mapstructure:"memory" hcl:"memory,optional"`
|
||||||
|
MemoryMaxMB *int `mapstructure:"memory_max" hcl:"memory_max,optional"`
|
||||||
|
Devices []*RequestedDevice `hcl:"device,block"`
|
||||||
|
NUMA *NUMAResource `hcl:"numa,block"`
|
||||||
|
SecretsMB *int `mapstructure:"secrets" hcl:"secrets,optional"`
|
||||||
|
Storage *QuotaStorageResources `mapstructure:"storage" hcl:"storage,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type QuotaStorageResources struct {
|
||||||
|
// VariablesMB is the maximum total size of all variables
|
||||||
|
// Variable.EncryptedData, in megabytes (2^20 bytes). A value of zero is
|
||||||
|
// treated as unlimited and a negative value is treated as fully disallowed.
|
||||||
|
VariablesMB int `hcl:"variables"`
|
||||||
|
|
||||||
|
// HostVolumesMB is the maximum provisioned size of all dynamic host
|
||||||
|
// volumes, in megabytes (2^20 bytes). A value of zero is treated as
|
||||||
|
// unlimited and a negative value is treated as fully disallowed.
|
||||||
|
HostVolumesMB int `hcl:"host_volumes"`
|
||||||
|
}
|
||||||
|
|
||||||
// QuotaUsage is the resource usage of a Quota
|
// QuotaUsage is the resource usage of a Quota
|
||||||
type QuotaUsage struct {
|
type QuotaUsage struct {
|
||||||
Name string
|
Name string
|
||||||
|
|||||||
32
vendor/github.com/hashicorp/nomad/api/resources.go
generated
vendored
32
vendor/github.com/hashicorp/nomad/api/resources.go
generated
vendored
@@ -4,6 +4,7 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -18,6 +19,7 @@ type Resources struct {
|
|||||||
Networks []*NetworkResource `hcl:"network,block"`
|
Networks []*NetworkResource `hcl:"network,block"`
|
||||||
Devices []*RequestedDevice `hcl:"device,block"`
|
Devices []*RequestedDevice `hcl:"device,block"`
|
||||||
NUMA *NUMAResource `hcl:"numa,block"`
|
NUMA *NUMAResource `hcl:"numa,block"`
|
||||||
|
SecretsMB *int `mapstructure:"secrets" hcl:"secrets,optional"`
|
||||||
|
|
||||||
// COMPAT(0.10)
|
// COMPAT(0.10)
|
||||||
// XXX Deprecated. Please do not use. The field will be removed in Nomad
|
// XXX Deprecated. Please do not use. The field will be removed in Nomad
|
||||||
@@ -103,6 +105,9 @@ func (r *Resources) Merge(other *Resources) {
|
|||||||
if other.NUMA != nil {
|
if other.NUMA != nil {
|
||||||
r.NUMA = other.NUMA.Copy()
|
r.NUMA = other.NUMA.Copy()
|
||||||
}
|
}
|
||||||
|
if other.SecretsMB != nil {
|
||||||
|
r.SecretsMB = other.SecretsMB
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NUMAResource contains the NUMA affinity request for scheduling purposes.
|
// NUMAResource contains the NUMA affinity request for scheduling purposes.
|
||||||
@@ -111,6 +116,10 @@ func (r *Resources) Merge(other *Resources) {
|
|||||||
type NUMAResource struct {
|
type NUMAResource struct {
|
||||||
// Affinity must be one of "none", "prefer", "require".
|
// Affinity must be one of "none", "prefer", "require".
|
||||||
Affinity string `hcl:"affinity,optional"`
|
Affinity string `hcl:"affinity,optional"`
|
||||||
|
|
||||||
|
// Devices is the subset of devices requested by the task that must share
|
||||||
|
// the same numa node, along with the tasks reserved cpu cores.
|
||||||
|
Devices []string `hcl:"devices,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NUMAResource) Copy() *NUMAResource {
|
func (n *NUMAResource) Copy() *NUMAResource {
|
||||||
@@ -119,6 +128,7 @@ func (n *NUMAResource) Copy() *NUMAResource {
|
|||||||
}
|
}
|
||||||
return &NUMAResource{
|
return &NUMAResource{
|
||||||
Affinity: n.Affinity,
|
Affinity: n.Affinity,
|
||||||
|
Devices: slices.Clone(n.Devices),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -129,13 +139,17 @@ func (n *NUMAResource) Canonicalize() {
|
|||||||
if n.Affinity == "" {
|
if n.Affinity == "" {
|
||||||
n.Affinity = "none"
|
n.Affinity = "none"
|
||||||
}
|
}
|
||||||
|
if len(n.Devices) == 0 {
|
||||||
|
n.Devices = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type Port struct {
|
type Port struct {
|
||||||
Label string `hcl:",label"`
|
Label string `hcl:",label"`
|
||||||
Value int `hcl:"static,optional"`
|
Value int `hcl:"static,optional"`
|
||||||
To int `hcl:"to,optional"`
|
To int `hcl:"to,optional"`
|
||||||
HostNetwork string `hcl:"host_network,optional"`
|
HostNetwork string `hcl:"host_network,optional"`
|
||||||
|
IgnoreCollision bool `hcl:"ignore_collision,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DNSConfig struct {
|
type DNSConfig struct {
|
||||||
@@ -143,6 +157,9 @@ type DNSConfig struct {
|
|||||||
Searches []string `mapstructure:"searches" hcl:"searches,optional"`
|
Searches []string `mapstructure:"searches" hcl:"searches,optional"`
|
||||||
Options []string `mapstructure:"options" hcl:"options,optional"`
|
Options []string `mapstructure:"options" hcl:"options,optional"`
|
||||||
}
|
}
|
||||||
|
type CNIConfig struct {
|
||||||
|
Args map[string]string `hcl:"args,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
// NetworkResource is used to describe required network
|
// NetworkResource is used to describe required network
|
||||||
// resources of a given task.
|
// resources of a given task.
|
||||||
@@ -160,11 +177,14 @@ type NetworkResource struct {
|
|||||||
// XXX Deprecated. Please do not use. The field will be removed in Nomad
|
// XXX Deprecated. Please do not use. The field will be removed in Nomad
|
||||||
// 0.13 and is only being kept to allow any references to be removed before
|
// 0.13 and is only being kept to allow any references to be removed before
|
||||||
// then.
|
// then.
|
||||||
MBits *int `hcl:"mbits,optional"`
|
MBits *int `hcl:"mbits,optional"`
|
||||||
|
CNI *CNIConfig `hcl:"cni,block"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Megabits should not be used.
|
||||||
|
//
|
||||||
// COMPAT(0.13)
|
// COMPAT(0.13)
|
||||||
// XXX Deprecated. Please do not use. The method will be removed in Nomad
|
// Deprecated. Please do not use. The method will be removed in Nomad
|
||||||
// 0.13 and is only being kept to allow any references to be removed before
|
// 0.13 and is only being kept to allow any references to be removed before
|
||||||
// then.
|
// then.
|
||||||
func (n *NetworkResource) Megabits() int {
|
func (n *NetworkResource) Megabits() int {
|
||||||
|
|||||||
4
vendor/github.com/hashicorp/nomad/api/retry.go
generated
vendored
4
vendor/github.com/hashicorp/nomad/api/retry.go
generated
vendored
@@ -39,7 +39,7 @@ func (c *Client) retryPut(ctx context.Context, endpoint string, in, out any, q *
|
|||||||
var err error
|
var err error
|
||||||
var wm *WriteMeta
|
var wm *WriteMeta
|
||||||
|
|
||||||
attemptDelay := time.Duration(100 * time.Second) // Avoid a tick before starting
|
attemptDelay := 100 * time.Second // Avoid a tick before starting
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
|
|
||||||
t := time.NewTimer(attemptDelay)
|
t := time.NewTimer(attemptDelay)
|
||||||
@@ -60,7 +60,7 @@ func (c *Client) retryPut(ctx context.Context, endpoint string, in, out any, q *
|
|||||||
wm, err = c.put(endpoint, in, out, q)
|
wm, err = c.put(endpoint, in, out, q)
|
||||||
|
|
||||||
// Maximum retry period is up, don't retry
|
// Maximum retry period is up, don't retry
|
||||||
if c.config.retryOptions.maxToLastCall != 0 && time.Now().Sub(startTime) > c.config.retryOptions.maxToLastCall {
|
if c.config.retryOptions.maxToLastCall != 0 && time.Since(startTime) > c.config.retryOptions.maxToLastCall {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
5
vendor/github.com/hashicorp/nomad/api/scaling.go
generated
vendored
5
vendor/github.com/hashicorp/nomad/api/scaling.go
generated
vendored
@@ -57,8 +57,13 @@ type ScalingRequest struct {
|
|||||||
Error bool
|
Error bool
|
||||||
Meta map[string]interface{}
|
Meta map[string]interface{}
|
||||||
WriteRequest
|
WriteRequest
|
||||||
|
|
||||||
// this is effectively a job update, so we need the ability to override policy.
|
// this is effectively a job update, so we need the ability to override policy.
|
||||||
PolicyOverride bool
|
PolicyOverride bool
|
||||||
|
|
||||||
|
// If JobModifyIndex is set then the job will only be scaled if it matches
|
||||||
|
// the current Jobs index. The JobModifyIndex is ignored if 0.
|
||||||
|
JobModifyIndex uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScalingPolicy is the user-specified API object for an autoscaling policy
|
// ScalingPolicy is the user-specified API object for an autoscaling policy
|
||||||
|
|||||||
3
vendor/github.com/hashicorp/nomad/api/search.go
generated
vendored
3
vendor/github.com/hashicorp/nomad/api/search.go
generated
vendored
@@ -64,7 +64,8 @@ func (s *Search) FuzzySearch(text string, context contexts.Context, q *QueryOpti
|
|||||||
// ID.
|
// ID.
|
||||||
//
|
//
|
||||||
// e.g. A Task-level service would have scope like,
|
// e.g. A Task-level service would have scope like,
|
||||||
// ["<namespace>", "<job>", "<group>", "<task>"]
|
//
|
||||||
|
// ["<namespace>", "<job>", "<group>", "<task>"]
|
||||||
type FuzzyMatch struct {
|
type FuzzyMatch struct {
|
||||||
ID string // ID is UUID or Name of object
|
ID string // ID is UUID or Name of object
|
||||||
Scope []string `json:",omitempty"` // IDs of parent objects
|
Scope []string `json:",omitempty"` // IDs of parent objects
|
||||||
|
|||||||
7
vendor/github.com/hashicorp/nomad/api/sentinel.go
generated
vendored
7
vendor/github.com/hashicorp/nomad/api/sentinel.go
generated
vendored
@@ -82,3 +82,10 @@ type SentinelPolicyListStub struct {
|
|||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Possible Sentinel scopes
|
||||||
|
const (
|
||||||
|
SentinelScopeSubmitJob = "submit-job"
|
||||||
|
SentinelScopeSubmitHostVolume = "submit-host-volume"
|
||||||
|
SentinelScopeSubmitCSIVolume = "submit-csi-volume"
|
||||||
|
)
|
||||||
|
|||||||
33
vendor/github.com/hashicorp/nomad/api/services.go
generated
vendored
33
vendor/github.com/hashicorp/nomad/api/services.go
generated
vendored
@@ -212,6 +212,7 @@ type ServiceCheck struct {
|
|||||||
Interval time.Duration `hcl:"interval,optional"`
|
Interval time.Duration `hcl:"interval,optional"`
|
||||||
Timeout time.Duration `hcl:"timeout,optional"`
|
Timeout time.Duration `hcl:"timeout,optional"`
|
||||||
InitialStatus string `mapstructure:"initial_status" hcl:"initial_status,optional"`
|
InitialStatus string `mapstructure:"initial_status" hcl:"initial_status,optional"`
|
||||||
|
Notes string `hcl:"notes,optional"`
|
||||||
TLSServerName string `mapstructure:"tls_server_name" hcl:"tls_server_name,optional"`
|
TLSServerName string `mapstructure:"tls_server_name" hcl:"tls_server_name,optional"`
|
||||||
TLSSkipVerify bool `mapstructure:"tls_skip_verify" hcl:"tls_skip_verify,optional"`
|
TLSSkipVerify bool `mapstructure:"tls_skip_verify" hcl:"tls_skip_verify,optional"`
|
||||||
Header map[string][]string `hcl:"header,block"`
|
Header map[string][]string `hcl:"header,block"`
|
||||||
@@ -222,6 +223,7 @@ type ServiceCheck struct {
|
|||||||
TaskName string `mapstructure:"task" hcl:"task,optional"`
|
TaskName string `mapstructure:"task" hcl:"task,optional"`
|
||||||
SuccessBeforePassing int `mapstructure:"success_before_passing" hcl:"success_before_passing,optional"`
|
SuccessBeforePassing int `mapstructure:"success_before_passing" hcl:"success_before_passing,optional"`
|
||||||
FailuresBeforeCritical int `mapstructure:"failures_before_critical" hcl:"failures_before_critical,optional"`
|
FailuresBeforeCritical int `mapstructure:"failures_before_critical" hcl:"failures_before_critical,optional"`
|
||||||
|
FailuresBeforeWarning int `mapstructure:"failures_before_warning" hcl:"failures_before_warning,optional"`
|
||||||
Body string `hcl:"body,optional"`
|
Body string `hcl:"body,optional"`
|
||||||
OnUpdate string `mapstructure:"on_update" hcl:"on_update,optional"`
|
OnUpdate string `mapstructure:"on_update" hcl:"on_update,optional"`
|
||||||
}
|
}
|
||||||
@@ -244,13 +246,17 @@ type Service struct {
|
|||||||
TaskName string `mapstructure:"task" hcl:"task,optional"`
|
TaskName string `mapstructure:"task" hcl:"task,optional"`
|
||||||
OnUpdate string `mapstructure:"on_update" hcl:"on_update,optional"`
|
OnUpdate string `mapstructure:"on_update" hcl:"on_update,optional"`
|
||||||
Identity *WorkloadIdentity `hcl:"identity,block"`
|
Identity *WorkloadIdentity `hcl:"identity,block"`
|
||||||
|
Weights *ServiceWeights `mapstructure:"weights" hcl:"weights,block"`
|
||||||
|
|
||||||
// Provider defines which backend system provides the service registration,
|
// Provider defines which backend system provides the service registration,
|
||||||
// either "consul" (default) or "nomad".
|
// either "consul" (default) or "nomad".
|
||||||
Provider string `hcl:"provider,optional"`
|
Provider string `hcl:"provider,optional"`
|
||||||
|
|
||||||
// Cluster is valid only for Nomad Enterprise with provider: consul
|
// Cluster is valid only for Nomad Enterprise with provider: consul
|
||||||
Cluster string `hcl:"cluster,optional`
|
Cluster string `hcl:"cluster,optional"`
|
||||||
|
|
||||||
|
// Kind defines the consul service kind, valid only when provider: consul
|
||||||
|
Kind string `hcl:"kind,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -305,6 +311,7 @@ func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.Connect.Canonicalize()
|
s.Connect.Canonicalize()
|
||||||
|
s.Weights.Canonicalize()
|
||||||
|
|
||||||
// Canonicalize CheckRestart on Checks and merge Service.CheckRestart
|
// Canonicalize CheckRestart on Checks and merge Service.CheckRestart
|
||||||
// into each check.
|
// into each check.
|
||||||
@@ -320,9 +327,33 @@ func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
|
|||||||
s.Checks[i].FailuresBeforeCritical = 0
|
s.Checks[i].FailuresBeforeCritical = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.Checks[i].FailuresBeforeWarning < 0 {
|
||||||
|
s.Checks[i].FailuresBeforeWarning = 0
|
||||||
|
}
|
||||||
|
|
||||||
// Inhert Service
|
// Inhert Service
|
||||||
if s.Checks[i].OnUpdate == "" {
|
if s.Checks[i].OnUpdate == "" {
|
||||||
s.Checks[i].OnUpdate = s.OnUpdate
|
s.Checks[i].OnUpdate = s.OnUpdate
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ServiceWeights is the jobspec block which configures how a service instance
|
||||||
|
// is weighted in a DNS SRV request based on the service's health status.
|
||||||
|
type ServiceWeights struct {
|
||||||
|
Passing int `hcl:"passing,optional"`
|
||||||
|
Warning int `hcl:"warning,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (weights *ServiceWeights) Canonicalize() {
|
||||||
|
if weights == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if weights.Passing <= 0 {
|
||||||
|
weights.Passing = 1
|
||||||
|
}
|
||||||
|
if weights.Warning <= 0 {
|
||||||
|
weights.Warning = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
14
vendor/github.com/hashicorp/nomad/api/task_sched.go
generated
vendored
Normal file
14
vendor/github.com/hashicorp/nomad/api/task_sched.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
type TaskSchedule struct {
|
||||||
|
Cron *TaskScheduleCron `hcl:"cron,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TaskScheduleCron struct {
|
||||||
|
Start string `hcl:"start,optional"`
|
||||||
|
End string `hcl:"end,optional"`
|
||||||
|
Timezone string `hcl:"timezone,optional"`
|
||||||
|
}
|
||||||
200
vendor/github.com/hashicorp/nomad/api/tasks.go
generated
vendored
200
vendor/github.com/hashicorp/nomad/api/tasks.go
generated
vendored
@@ -11,6 +11,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type ReconcileOption = string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// RestartPolicyModeDelay causes an artificial delay till the next interval is
|
// RestartPolicyModeDelay causes an artificial delay till the next interval is
|
||||||
// reached when the specified attempts have been reached in the interval.
|
// reached when the specified attempts have been reached in the interval.
|
||||||
@@ -19,6 +21,14 @@ const (
|
|||||||
// RestartPolicyModeFail causes a job to fail if the specified number of
|
// RestartPolicyModeFail causes a job to fail if the specified number of
|
||||||
// attempts are reached within an interval.
|
// attempts are reached within an interval.
|
||||||
RestartPolicyModeFail = "fail"
|
RestartPolicyModeFail = "fail"
|
||||||
|
|
||||||
|
// ReconcileOption is used to specify the behavior of the reconciliation process
|
||||||
|
// between the original allocations and the replacements when a previously
|
||||||
|
// disconnected client comes back online.
|
||||||
|
ReconcileOptionKeepOriginal = "keep_original"
|
||||||
|
ReconcileOptionKeepReplacement = "keep_replacement"
|
||||||
|
ReconcileOptionBestScore = "best_score"
|
||||||
|
ReconcileOptionLongestRunning = "longest_running"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MemoryStats holds memory usage related stats
|
// MemoryStats holds memory usage related stats
|
||||||
@@ -113,6 +123,37 @@ func (r *RestartPolicy) Merge(rp *RestartPolicy) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Disconnect strategy defines how both clients and server should behave in case of
|
||||||
|
// disconnection between them.
|
||||||
|
type DisconnectStrategy struct {
|
||||||
|
// Defines for how long the server will consider the unresponsive node as
|
||||||
|
// disconnected but alive instead of lost.
|
||||||
|
LostAfter *time.Duration `mapstructure:"lost_after" hcl:"lost_after,optional"`
|
||||||
|
|
||||||
|
// Defines for how long a disconnected client will keep its allocations running.
|
||||||
|
StopOnClientAfter *time.Duration `mapstructure:"stop_on_client_after" hcl:"stop_on_client_after,optional"`
|
||||||
|
|
||||||
|
// A boolean field used to define if the allocations should be replaced while
|
||||||
|
// it's considered disconnected.
|
||||||
|
Replace *bool `mapstructure:"replace" hcl:"replace,optional"`
|
||||||
|
|
||||||
|
// Once the disconnected node starts reporting again, it will define which
|
||||||
|
// instances to keep: the original allocations, the replacement, the one
|
||||||
|
// running on the node with the best score as it is currently implemented,
|
||||||
|
// or the allocation that has been running continuously the longest.
|
||||||
|
Reconcile *ReconcileOption `mapstructure:"reconcile" hcl:"reconcile,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ds *DisconnectStrategy) Canonicalize() {
|
||||||
|
if ds.Replace == nil {
|
||||||
|
ds.Replace = pointerOf(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ds.Reconcile == nil {
|
||||||
|
ds.Reconcile = pointerOf(ReconcileOptionBestScore)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Reschedule configures how Tasks are rescheduled when they crash or fail.
|
// Reschedule configures how Tasks are rescheduled when they crash or fail.
|
||||||
type ReschedulePolicy struct {
|
type ReschedulePolicy struct {
|
||||||
// Attempts limits the number of rescheduling attempts that can occur in an interval.
|
// Attempts limits the number of rescheduling attempts that can occur in an interval.
|
||||||
@@ -195,7 +236,7 @@ func NewAffinity(lTarget string, operand string, rTarget string, weight int8) *A
|
|||||||
LTarget: lTarget,
|
LTarget: lTarget,
|
||||||
RTarget: rTarget,
|
RTarget: rTarget,
|
||||||
Operand: operand,
|
Operand: operand,
|
||||||
Weight: pointerOf(int8(weight)),
|
Weight: pointerOf(weight),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -205,6 +246,14 @@ func (a *Affinity) Canonicalize() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewDefaultDisconnectStrategy() *DisconnectStrategy {
|
||||||
|
return &DisconnectStrategy{
|
||||||
|
LostAfter: pointerOf(0 * time.Minute),
|
||||||
|
Replace: pointerOf(true),
|
||||||
|
Reconcile: pointerOf(ReconcileOptionBestScore),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy {
|
func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy {
|
||||||
var dp *ReschedulePolicy
|
var dp *ReschedulePolicy
|
||||||
switch jobType {
|
switch jobType {
|
||||||
@@ -268,14 +317,14 @@ func (r *ReschedulePolicy) Copy() *ReschedulePolicy {
|
|||||||
return nrp
|
return nrp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ReschedulePolicy) String() string {
|
func (r *ReschedulePolicy) String() string {
|
||||||
if p == nil {
|
if r == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
if *p.Unlimited {
|
if *r.Unlimited {
|
||||||
return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *p.DelayFunction, *p.MaxDelay)
|
return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *r.DelayFunction, *r.MaxDelay)
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *p.Attempts, *p.Interval, *p.DelayFunction, *p.MaxDelay)
|
return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *r.Attempts, *r.Interval, *r.DelayFunction, *r.MaxDelay)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spread is used to serialize task group allocation spread preferences
|
// Spread is used to serialize task group allocation spread preferences
|
||||||
@@ -301,7 +350,7 @@ func NewSpreadTarget(value string, percent uint8) *SpreadTarget {
|
|||||||
func NewSpread(attribute string, weight int8, spreadTargets []*SpreadTarget) *Spread {
|
func NewSpread(attribute string, weight int8, spreadTargets []*SpreadTarget) *Spread {
|
||||||
return &Spread{
|
return &Spread{
|
||||||
Attribute: attribute,
|
Attribute: attribute,
|
||||||
Weight: pointerOf(int8(weight)),
|
Weight: pointerOf(weight),
|
||||||
SpreadTarget: spreadTargets,
|
SpreadTarget: spreadTargets,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -406,6 +455,7 @@ type VolumeRequest struct {
|
|||||||
Type string `hcl:"type,optional"`
|
Type string `hcl:"type,optional"`
|
||||||
Source string `hcl:"source,optional"`
|
Source string `hcl:"source,optional"`
|
||||||
ReadOnly bool `hcl:"read_only,optional"`
|
ReadOnly bool `hcl:"read_only,optional"`
|
||||||
|
Sticky bool `hcl:"sticky,optional"`
|
||||||
AccessMode string `hcl:"access_mode,optional"`
|
AccessMode string `hcl:"access_mode,optional"`
|
||||||
AttachmentMode string `hcl:"attachment_mode,optional"`
|
AttachmentMode string `hcl:"attachment_mode,optional"`
|
||||||
MountOptions *CSIMountOptions `hcl:"mount_options,block"`
|
MountOptions *CSIMountOptions `hcl:"mount_options,block"`
|
||||||
@@ -426,40 +476,50 @@ type VolumeMount struct {
|
|||||||
Destination *string `hcl:"destination,optional"`
|
Destination *string `hcl:"destination,optional"`
|
||||||
ReadOnly *bool `mapstructure:"read_only" hcl:"read_only,optional"`
|
ReadOnly *bool `mapstructure:"read_only" hcl:"read_only,optional"`
|
||||||
PropagationMode *string `mapstructure:"propagation_mode" hcl:"propagation_mode,optional"`
|
PropagationMode *string `mapstructure:"propagation_mode" hcl:"propagation_mode,optional"`
|
||||||
|
SELinuxLabel *string `mapstructure:"selinux_label" hcl:"selinux_label,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vm *VolumeMount) Canonicalize() {
|
func (vm *VolumeMount) Canonicalize() {
|
||||||
if vm.PropagationMode == nil {
|
if vm.PropagationMode == nil {
|
||||||
vm.PropagationMode = pointerOf(VolumeMountPropagationPrivate)
|
vm.PropagationMode = pointerOf(VolumeMountPropagationPrivate)
|
||||||
}
|
}
|
||||||
|
|
||||||
if vm.ReadOnly == nil {
|
if vm.ReadOnly == nil {
|
||||||
vm.ReadOnly = pointerOf(false)
|
vm.ReadOnly = pointerOf(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if vm.SELinuxLabel == nil {
|
||||||
|
vm.SELinuxLabel = pointerOf("")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TaskGroup is the unit of scheduling.
|
// TaskGroup is the unit of scheduling.
|
||||||
type TaskGroup struct {
|
type TaskGroup struct {
|
||||||
Name *string `hcl:"name,label"`
|
Name *string `hcl:"name,label"`
|
||||||
Count *int `hcl:"count,optional"`
|
Count *int `hcl:"count,optional"`
|
||||||
Constraints []*Constraint `hcl:"constraint,block"`
|
Constraints []*Constraint `hcl:"constraint,block"`
|
||||||
Affinities []*Affinity `hcl:"affinity,block"`
|
Affinities []*Affinity `hcl:"affinity,block"`
|
||||||
Tasks []*Task `hcl:"task,block"`
|
Tasks []*Task `hcl:"task,block"`
|
||||||
Spreads []*Spread `hcl:"spread,block"`
|
Spreads []*Spread `hcl:"spread,block"`
|
||||||
Volumes map[string]*VolumeRequest `hcl:"volume,block"`
|
Volumes map[string]*VolumeRequest `hcl:"volume,block"`
|
||||||
RestartPolicy *RestartPolicy `hcl:"restart,block"`
|
RestartPolicy *RestartPolicy `hcl:"restart,block"`
|
||||||
ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"`
|
Disconnect *DisconnectStrategy `hcl:"disconnect,block"`
|
||||||
EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"`
|
ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"`
|
||||||
Update *UpdateStrategy `hcl:"update,block"`
|
EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"`
|
||||||
Migrate *MigrateStrategy `hcl:"migrate,block"`
|
Update *UpdateStrategy `hcl:"update,block"`
|
||||||
Networks []*NetworkResource `hcl:"network,block"`
|
Migrate *MigrateStrategy `hcl:"migrate,block"`
|
||||||
Meta map[string]string `hcl:"meta,block"`
|
Networks []*NetworkResource `hcl:"network,block"`
|
||||||
Services []*Service `hcl:"service,block"`
|
Meta map[string]string `hcl:"meta,block"`
|
||||||
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"`
|
Services []*Service `hcl:"service,block"`
|
||||||
StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"`
|
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"`
|
||||||
MaxClientDisconnect *time.Duration `mapstructure:"max_client_disconnect" hcl:"max_client_disconnect,optional"`
|
// Deprecated: StopAfterClientDisconnect is deprecated in Nomad 1.8 and ignored in Nomad 1.10. Use Disconnect.StopOnClientAfter.
|
||||||
Scaling *ScalingPolicy `hcl:"scaling,block"`
|
StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"`
|
||||||
Consul *Consul `hcl:"consul,block"`
|
// Deprecated: MaxClientDisconnect is deprecated in Nomad 1.8.0 and ignored in Nomad 1.10. Use Disconnect.LostAfter.
|
||||||
PreventRescheduleOnLost *bool `hcl:"prevent_reschedule_on_lost,optional"`
|
MaxClientDisconnect *time.Duration `mapstructure:"max_client_disconnect" hcl:"max_client_disconnect,optional"`
|
||||||
|
Scaling *ScalingPolicy `hcl:"scaling,block"`
|
||||||
|
Consul *Consul `hcl:"consul,block"`
|
||||||
|
// Deprecated: PreventRescheduleOnLost is deprecated in Nomad 1.8.0 and ignored in Nomad 1.10. Use Disconnect.Replace.
|
||||||
|
PreventRescheduleOnLost *bool `hcl:"prevent_reschedule_on_lost,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTaskGroup creates a new TaskGroup.
|
// NewTaskGroup creates a new TaskGroup.
|
||||||
@@ -493,11 +553,10 @@ func (g *TaskGroup) Canonicalize(job *Job) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Merge job.consul onto group.consul
|
// Merge job.consul onto group.consul
|
||||||
if g.Consul == nil {
|
if g.Consul != nil {
|
||||||
g.Consul = new(Consul)
|
g.Consul.MergeNamespace(job.ConsulNamespace)
|
||||||
|
g.Consul.Canonicalize()
|
||||||
}
|
}
|
||||||
g.Consul.MergeNamespace(job.ConsulNamespace)
|
|
||||||
g.Consul.Canonicalize()
|
|
||||||
|
|
||||||
// Merge the update policy from the job
|
// Merge the update policy from the job
|
||||||
if ju, tu := job.Update != nil, g.Update != nil; ju && tu {
|
if ju, tu := job.Update != nil, g.Update != nil; ju && tu {
|
||||||
@@ -531,6 +590,7 @@ func (g *TaskGroup) Canonicalize(job *Job) {
|
|||||||
if g.ReschedulePolicy != nil {
|
if g.ReschedulePolicy != nil {
|
||||||
g.ReschedulePolicy.Canonicalize(*job.Type)
|
g.ReschedulePolicy.Canonicalize(*job.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge the migrate strategy from the job
|
// Merge the migrate strategy from the job
|
||||||
if jm, tm := job.Migrate != nil, g.Migrate != nil; jm && tm {
|
if jm, tm := job.Migrate != nil, g.Migrate != nil; jm && tm {
|
||||||
jobMigrate := job.Migrate.Copy()
|
jobMigrate := job.Migrate.Copy()
|
||||||
@@ -578,8 +638,9 @@ func (g *TaskGroup) Canonicalize(job *Job) {
|
|||||||
for _, s := range g.Services {
|
for _, s := range g.Services {
|
||||||
s.Canonicalize(nil, g, job)
|
s.Canonicalize(nil, g, job)
|
||||||
}
|
}
|
||||||
if g.PreventRescheduleOnLost == nil {
|
|
||||||
g.PreventRescheduleOnLost = pointerOf(false)
|
if g.Disconnect != nil {
|
||||||
|
g.Disconnect.Canonicalize()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -613,7 +674,7 @@ func (g *TaskGroup) Constrain(c *Constraint) *TaskGroup {
|
|||||||
return g
|
return g
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddMeta is used to add a meta k/v pair to a task group
|
// SetMeta is used to add a meta k/v pair to a task group
|
||||||
func (g *TaskGroup) SetMeta(key, val string) *TaskGroup {
|
func (g *TaskGroup) SetMeta(key, val string) *TaskGroup {
|
||||||
if g.Meta == nil {
|
if g.Meta == nil {
|
||||||
g.Meta = make(map[string]string)
|
g.Meta = make(map[string]string)
|
||||||
@@ -646,6 +707,12 @@ func (g *TaskGroup) AddSpread(s *Spread) *TaskGroup {
|
|||||||
return g
|
return g
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ScalingPolicy is used to add a new scaling policy to a task group.
|
||||||
|
func (g *TaskGroup) ScalingPolicy(sp *ScalingPolicy) *TaskGroup {
|
||||||
|
g.Scaling = sp
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
// LogConfig provides configuration for log rotation
|
// LogConfig provides configuration for log rotation
|
||||||
type LogConfig struct {
|
type LogConfig struct {
|
||||||
MaxFiles *int `mapstructure:"max_files" hcl:"max_files,optional"`
|
MaxFiles *int `mapstructure:"max_files" hcl:"max_files,optional"`
|
||||||
@@ -690,13 +757,13 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type TaskLifecycle struct {
|
type TaskLifecycle struct {
|
||||||
Hook string `mapstructure:"hook" hcl:"hook,optional"`
|
Hook string `mapstructure:"hook" hcl:"hook"`
|
||||||
Sidecar bool `mapstructure:"sidecar" hcl:"sidecar,optional"`
|
Sidecar bool `mapstructure:"sidecar" hcl:"sidecar,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine if lifecycle has user-input values
|
// Empty determines if lifecycle has user-input values
|
||||||
func (l *TaskLifecycle) Empty() bool {
|
func (l *TaskLifecycle) Empty() bool {
|
||||||
return l == nil || (l.Hook == "")
|
return l == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Task is a single process in a task group.
|
// Task is a single process in a task group.
|
||||||
@@ -736,6 +803,8 @@ type Task struct {
|
|||||||
Identities []*WorkloadIdentity `hcl:"identity,block"`
|
Identities []*WorkloadIdentity `hcl:"identity,block"`
|
||||||
|
|
||||||
Actions []*Action `hcl:"action,block"`
|
Actions []*Action `hcl:"action,block"`
|
||||||
|
|
||||||
|
Schedule *TaskSchedule `hcl:"schedule,block"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {
|
func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {
|
||||||
@@ -791,17 +860,22 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {
|
|||||||
|
|
||||||
// TaskArtifact is used to download artifacts before running a task.
|
// TaskArtifact is used to download artifacts before running a task.
|
||||||
type TaskArtifact struct {
|
type TaskArtifact struct {
|
||||||
GetterSource *string `mapstructure:"source" hcl:"source,optional"`
|
GetterSource *string `mapstructure:"source" hcl:"source,optional"`
|
||||||
GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"`
|
GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"`
|
||||||
GetterHeaders map[string]string `mapstructure:"headers" hcl:"headers,block"`
|
GetterHeaders map[string]string `mapstructure:"headers" hcl:"headers,block"`
|
||||||
GetterMode *string `mapstructure:"mode" hcl:"mode,optional"`
|
GetterMode *string `mapstructure:"mode" hcl:"mode,optional"`
|
||||||
RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"`
|
GetterInsecure *bool `mapstructure:"insecure" hcl:"insecure,optional"`
|
||||||
|
RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"`
|
||||||
|
Chown bool `mapstructure:"chown" hcl:"chown,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *TaskArtifact) Canonicalize() {
|
func (a *TaskArtifact) Canonicalize() {
|
||||||
if a.GetterMode == nil {
|
if a.GetterMode == nil {
|
||||||
a.GetterMode = pointerOf("any")
|
a.GetterMode = pointerOf("any")
|
||||||
}
|
}
|
||||||
|
if a.GetterInsecure == nil {
|
||||||
|
a.GetterInsecure = pointerOf(false)
|
||||||
|
}
|
||||||
if a.GetterSource == nil {
|
if a.GetterSource == nil {
|
||||||
// Shouldn't be possible, but we don't want to panic
|
// Shouldn't be possible, but we don't want to panic
|
||||||
a.GetterSource = pointerOf("")
|
a.GetterSource = pointerOf("")
|
||||||
@@ -874,6 +948,7 @@ type Template struct {
|
|||||||
ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"`
|
ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"`
|
||||||
ChangeScript *ChangeScript `mapstructure:"change_script" hcl:"change_script,block"`
|
ChangeScript *ChangeScript `mapstructure:"change_script" hcl:"change_script,block"`
|
||||||
ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"`
|
ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"`
|
||||||
|
Once *bool `mapstructure:"once" hcl:"once,optional"`
|
||||||
Splay *time.Duration `mapstructure:"splay" hcl:"splay,optional"`
|
Splay *time.Duration `mapstructure:"splay" hcl:"splay,optional"`
|
||||||
Perms *string `mapstructure:"perms" hcl:"perms,optional"`
|
Perms *string `mapstructure:"perms" hcl:"perms,optional"`
|
||||||
Uid *int `mapstructure:"uid" hcl:"uid,optional"`
|
Uid *int `mapstructure:"uid" hcl:"uid,optional"`
|
||||||
@@ -912,6 +987,9 @@ func (tmpl *Template) Canonicalize() {
|
|||||||
if tmpl.ChangeScript != nil {
|
if tmpl.ChangeScript != nil {
|
||||||
tmpl.ChangeScript.Canonicalize()
|
tmpl.ChangeScript.Canonicalize()
|
||||||
}
|
}
|
||||||
|
if tmpl.Once == nil {
|
||||||
|
tmpl.Once = pointerOf(false)
|
||||||
|
}
|
||||||
if tmpl.Splay == nil {
|
if tmpl.Splay == nil {
|
||||||
tmpl.Splay = pointerOf(5 * time.Second)
|
tmpl.Splay = pointerOf(5 * time.Second)
|
||||||
}
|
}
|
||||||
@@ -937,14 +1015,15 @@ func (tmpl *Template) Canonicalize() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Vault struct {
|
type Vault struct {
|
||||||
Policies []string `hcl:"policies,optional"`
|
Policies []string `hcl:"policies,optional"`
|
||||||
Role string `hcl:"role,optional"`
|
Role string `hcl:"role,optional"`
|
||||||
Namespace *string `mapstructure:"namespace" hcl:"namespace,optional"`
|
Namespace *string `mapstructure:"namespace" hcl:"namespace,optional"`
|
||||||
Cluster string `hcl:"cluster,optional"`
|
Cluster string `hcl:"cluster,optional"`
|
||||||
Env *bool `hcl:"env,optional"`
|
Env *bool `hcl:"env,optional"`
|
||||||
DisableFile *bool `mapstructure:"disable_file" hcl:"disable_file,optional"`
|
DisableFile *bool `mapstructure:"disable_file" hcl:"disable_file,optional"`
|
||||||
ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"`
|
ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"`
|
||||||
ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"`
|
ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"`
|
||||||
|
AllowTokenExpiration *bool `mapstructure:"allow_token_expiration" hcl:"allow_token_expiration,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *Vault) Canonicalize() {
|
func (v *Vault) Canonicalize() {
|
||||||
@@ -966,6 +1045,9 @@ func (v *Vault) Canonicalize() {
|
|||||||
if v.ChangeSignal == nil {
|
if v.ChangeSignal == nil {
|
||||||
v.ChangeSignal = pointerOf("SIGHUP")
|
v.ChangeSignal = pointerOf("SIGHUP")
|
||||||
}
|
}
|
||||||
|
if v.AllowTokenExpiration == nil {
|
||||||
|
v.AllowTokenExpiration = pointerOf(false)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTask creates and initializes a new Task.
|
// NewTask creates and initializes a new Task.
|
||||||
@@ -976,7 +1058,7 @@ func NewTask(name, driver string) *Task {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure is used to configure a single k/v pair on
|
// SetConfig is used to configure a single k/v pair on
|
||||||
// the task.
|
// the task.
|
||||||
func (t *Task) SetConfig(key string, val interface{}) *Task {
|
func (t *Task) SetConfig(key string, val interface{}) *Task {
|
||||||
if t.Config == nil {
|
if t.Config == nil {
|
||||||
@@ -1001,7 +1083,7 @@ func (t *Task) Require(r *Resources) *Task {
|
|||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
// Constraint adds a new constraints to a single task.
|
// Constrain adds a new constraints to a single task.
|
||||||
func (t *Task) Constrain(c *Constraint) *Task {
|
func (t *Task) Constrain(c *Constraint) *Task {
|
||||||
t.Constraints = append(t.Constraints, c)
|
t.Constraints = append(t.Constraints, c)
|
||||||
return t
|
return t
|
||||||
@@ -1035,17 +1117,6 @@ type TaskState struct {
|
|||||||
StartedAt time.Time
|
StartedAt time.Time
|
||||||
FinishedAt time.Time
|
FinishedAt time.Time
|
||||||
Events []*TaskEvent
|
Events []*TaskEvent
|
||||||
|
|
||||||
// Experimental - TaskHandle is based on drivers.TaskHandle and used
|
|
||||||
// by remote task drivers to migrate task handles between allocations.
|
|
||||||
TaskHandle *TaskHandle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Experimental - TaskHandle is based on drivers.TaskHandle and used by remote
|
|
||||||
// task drivers to migrate task handles between allocations.
|
|
||||||
type TaskHandle struct {
|
|
||||||
Version int
|
|
||||||
DriverState []byte
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -1171,6 +1242,7 @@ type WorkloadIdentity struct {
|
|||||||
ChangeSignal string `mapstructure:"change_signal" hcl:"change_signal,optional"`
|
ChangeSignal string `mapstructure:"change_signal" hcl:"change_signal,optional"`
|
||||||
Env bool `hcl:"env,optional"`
|
Env bool `hcl:"env,optional"`
|
||||||
File bool `hcl:"file,optional"`
|
File bool `hcl:"file,optional"`
|
||||||
|
Filepath string `hcl:"filepath,optional"`
|
||||||
ServiceName string `hcl:"service_name,optional"`
|
ServiceName string `hcl:"service_name,optional"`
|
||||||
TTL time.Duration `mapstructure:"ttl" hcl:"ttl,optional"`
|
TTL time.Duration `mapstructure:"ttl" hcl:"ttl,optional"`
|
||||||
}
|
}
|
||||||
|
|||||||
9
vendor/github.com/hashicorp/nomad/api/utils.go
generated
vendored
9
vendor/github.com/hashicorp/nomad/api/utils.go
generated
vendored
@@ -33,3 +33,12 @@ func formatFloat(f float64, maxPrec int) string {
|
|||||||
func pointerOf[A any](a A) *A {
|
func pointerOf[A any](a A) *A {
|
||||||
return &a
|
return &a
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// pointerCopy returns a new pointer to a.
|
||||||
|
func pointerCopy[A any](a *A) *A {
|
||||||
|
if a == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
na := *a
|
||||||
|
return &na
|
||||||
|
}
|
||||||
|
|||||||
19
vendor/github.com/hashicorp/nomad/api/variables.go
generated
vendored
19
vendor/github.com/hashicorp/nomad/api/variables.go
generated
vendored
@@ -235,8 +235,8 @@ func (vars *Variables) readInternal(endpoint string, out **Variable, q *QueryOpt
|
|||||||
}
|
}
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
|
|
||||||
checkFn := requireStatusIn(http.StatusOK, http.StatusNotFound, http.StatusForbidden)
|
checkFn := requireStatusIn(http.StatusOK, http.StatusNotFound, http.StatusForbidden) //nolint:bodyclose
|
||||||
rtt, resp, err := checkFn(vars.client.doRequest(r))
|
rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -284,12 +284,12 @@ func (vars *Variables) deleteInternal(path string, q *WriteOptions) (*WriteMeta,
|
|||||||
}
|
}
|
||||||
r.setWriteOptions(q)
|
r.setWriteOptions(q)
|
||||||
|
|
||||||
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent)
|
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent) //nolint:bodyclose
|
||||||
rtt, resp, err := checkFn(vars.client.doRequest(r))
|
rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
wm := &WriteMeta{RequestTime: rtt}
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
_ = parseWriteMeta(resp, wm)
|
_ = parseWriteMeta(resp, wm)
|
||||||
@@ -305,11 +305,12 @@ func (vars *Variables) deleteChecked(path string, checkIndex uint64, q *WriteOpt
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r.setWriteOptions(q)
|
r.setWriteOptions(q)
|
||||||
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict)
|
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict) //nolint:bodyclose
|
||||||
rtt, resp, err := checkFn(vars.client.doRequest(r))
|
rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
wm := &WriteMeta{RequestTime: rtt}
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
_ = parseWriteMeta(resp, wm)
|
_ = parseWriteMeta(resp, wm)
|
||||||
@@ -341,8 +342,8 @@ func (vars *Variables) writeChecked(endpoint string, in *Variable, out *Variable
|
|||||||
r.setWriteOptions(q)
|
r.setWriteOptions(q)
|
||||||
r.obj = in
|
r.obj = in
|
||||||
|
|
||||||
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict)
|
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict) //nolint:bodyclose
|
||||||
rtt, resp, err := checkFn(vars.client.doRequest(r))
|
rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
27
vendor/golang.org/x/exp/LICENSE
generated
vendored
27
vendor/golang.org/x/exp/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
|||||||
Copyright 2009 The Go Authors.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google LLC nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
22
vendor/golang.org/x/exp/PATENTS
generated
vendored
22
vendor/golang.org/x/exp/PATENTS
generated
vendored
@@ -1,22 +0,0 @@
|
|||||||
Additional IP Rights Grant (Patents)
|
|
||||||
|
|
||||||
"This implementation" means the copyrightable works distributed by
|
|
||||||
Google as part of the Go project.
|
|
||||||
|
|
||||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
|
||||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
|
||||||
patent license to make, have made, use, offer to sell, sell, import,
|
|
||||||
transfer and otherwise run, modify and propagate the contents of this
|
|
||||||
implementation of Go, where such license applies only to those patent
|
|
||||||
claims, both currently owned or controlled by Google and acquired in
|
|
||||||
the future, licensable by Google that are necessarily infringed by this
|
|
||||||
implementation of Go. This grant does not include claims that would be
|
|
||||||
infringed only as a consequence of further modification of this
|
|
||||||
implementation. If you or your agent or exclusive licensee institute or
|
|
||||||
order or agree to the institution of patent litigation against any
|
|
||||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
|
||||||
that this implementation of Go or any code incorporated within this
|
|
||||||
implementation of Go constitutes direct or contributory patent
|
|
||||||
infringement, or inducement of patent infringement, then any patent
|
|
||||||
rights granted to you under this License for this implementation of Go
|
|
||||||
shall terminate as of the date such litigation is filed.
|
|
||||||
86
vendor/golang.org/x/exp/maps/maps.go
generated
vendored
86
vendor/golang.org/x/exp/maps/maps.go
generated
vendored
@@ -1,86 +0,0 @@
|
|||||||
// Copyright 2021 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package maps defines various functions useful with maps of any type.
|
|
||||||
package maps
|
|
||||||
|
|
||||||
import "maps"
|
|
||||||
|
|
||||||
// Keys returns the keys of the map m.
|
|
||||||
// The keys will be in an indeterminate order.
|
|
||||||
//
|
|
||||||
// The simplest true equivalent using the standard library is:
|
|
||||||
//
|
|
||||||
// slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m))
|
|
||||||
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
|
|
||||||
|
|
||||||
r := make([]K, 0, len(m))
|
|
||||||
for k := range m {
|
|
||||||
r = append(r, k)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values returns the values of the map m.
|
|
||||||
// The values will be in an indeterminate order.
|
|
||||||
//
|
|
||||||
// The simplest true equivalent using the standard library is:
|
|
||||||
//
|
|
||||||
// slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m))
|
|
||||||
func Values[M ~map[K]V, K comparable, V any](m M) []V {
|
|
||||||
|
|
||||||
r := make([]V, 0, len(m))
|
|
||||||
for _, v := range m {
|
|
||||||
r = append(r, v)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal reports whether two maps contain the same key/value pairs.
|
|
||||||
// Values are compared using ==.
|
|
||||||
//
|
|
||||||
//go:fix inline
|
|
||||||
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
|
|
||||||
return maps.Equal(m1, m2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EqualFunc is like Equal, but compares values using eq.
|
|
||||||
// Keys are still compared with ==.
|
|
||||||
//
|
|
||||||
//go:fix inline
|
|
||||||
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
|
|
||||||
return maps.EqualFunc(m1, m2, eq)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear removes all entries from m, leaving it empty.
|
|
||||||
//
|
|
||||||
//go:fix inline
|
|
||||||
func Clear[M ~map[K]V, K comparable, V any](m M) {
|
|
||||||
clear(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone returns a copy of m. This is a shallow clone:
|
|
||||||
// the new keys and values are set using ordinary assignment.
|
|
||||||
//
|
|
||||||
//go:fix inline
|
|
||||||
func Clone[M ~map[K]V, K comparable, V any](m M) M {
|
|
||||||
return maps.Clone(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy copies all key/value pairs in src adding them to dst.
|
|
||||||
// When a key in src is already present in dst,
|
|
||||||
// the value in dst will be overwritten by the value associated
|
|
||||||
// with the key in src.
|
|
||||||
//
|
|
||||||
//go:fix inline
|
|
||||||
func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
|
|
||||||
maps.Copy(dst, src)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteFunc deletes any key/value pairs from m for which del returns true.
|
|
||||||
//
|
|
||||||
//go:fix inline
|
|
||||||
func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
|
|
||||||
maps.DeleteFunc(m, del)
|
|
||||||
}
|
|
||||||
5
vendor/modules.txt
vendored
5
vendor/modules.txt
vendored
@@ -293,7 +293,7 @@ github.com/hashicorp/go-multierror
|
|||||||
# github.com/hashicorp/go-rootcerts v1.0.2
|
# github.com/hashicorp/go-rootcerts v1.0.2
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
github.com/hashicorp/go-rootcerts
|
github.com/hashicorp/go-rootcerts
|
||||||
# github.com/hashicorp/nomad/api v0.0.0-20231213195942-64e3dca9274b
|
# github.com/hashicorp/nomad/api v0.0.0-20250812204832-62b195aaa535
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
github.com/hashicorp/nomad/api
|
github.com/hashicorp/nomad/api
|
||||||
github.com/hashicorp/nomad/api/contexts
|
github.com/hashicorp/nomad/api/contexts
|
||||||
@@ -509,9 +509,6 @@ go.opentelemetry.io/otel/trace/noop
|
|||||||
golang.org/x/crypto/pbkdf2
|
golang.org/x/crypto/pbkdf2
|
||||||
golang.org/x/crypto/scrypt
|
golang.org/x/crypto/scrypt
|
||||||
golang.org/x/crypto/sha3
|
golang.org/x/crypto/sha3
|
||||||
# golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
|
|
||||||
## explicit; go 1.23.0
|
|
||||||
golang.org/x/exp/maps
|
|
||||||
# golang.org/x/mod v0.27.0
|
# golang.org/x/mod v0.27.0
|
||||||
## explicit; go 1.23.0
|
## explicit; go 1.23.0
|
||||||
golang.org/x/mod/semver
|
golang.org/x/mod/semver
|
||||||
|
|||||||
Reference in New Issue
Block a user