vendor: update to github.com/hashicorp/nomad/api v1.10.4

This commit is contained in:
CrazyMax
2025-08-31 10:36:02 +02:00
parent c14b41e442
commit 9c69ad1a1b
37 changed files with 1563 additions and 372 deletions

3
go.mod
View File

@@ -21,7 +21,7 @@ require (
github.com/eclipse/paho.mqtt.golang v1.5.0
github.com/go-gomail/gomail v0.0.0-20160411212932-81ebce5c23df
github.com/go-playground/validator/v10 v10.27.0
github.com/hashicorp/nomad/api v0.0.0-20231213195942-64e3dca9274b // v1.7.2
github.com/hashicorp/nomad/api v0.0.0-20250812204832-62b195aaa535 // v1.10.4
github.com/jedib0t/go-pretty/v6 v6.6.8
github.com/matcornic/hermes/v2 v2.1.0
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
@@ -135,7 +135,6 @@ require (
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
golang.org/x/net v0.41.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.16.0 // indirect

12
go.sum
View File

@@ -180,8 +180,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/nomad/api v0.0.0-20231213195942-64e3dca9274b h1:R1UDhkwGltpSPY9bCBBxIMQd+NY9BkN0vFHnJo/8o8w=
github.com/hashicorp/nomad/api v0.0.0-20231213195942-64e3dca9274b/go.mod h1:ijDwa6o1uG1jFSq6kERiX2PamKGpZzTmo0XOFNeFZgw=
github.com/hashicorp/nomad/api v0.0.0-20250812204832-62b195aaa535 h1:DZhOjjH4Gf6TOQYCDYwWzmnY9/jKeUWnaKEdAybhnEM=
github.com/hashicorp/nomad/api v0.0.0-20250812204832-62b195aaa535/go.mod h1:y4olHzVXiQolzyk6QD/gqJxQTnnchlTf/QtczFFKwOI=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
@@ -240,8 +240,6 @@ github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwX
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ=
@@ -317,8 +315,8 @@ github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk=
github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI=
github.com/shoenig/test v1.12.1 h1:mLHfnMv7gmhhP44WrvT+nKSxKkPDiNkIuHGdIGI9RLU=
github.com/shoenig/test v1.12.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
@@ -386,8 +384,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=

View File

@@ -2,7 +2,11 @@ schema_version = 1
project {
license = "MPL-2.0"
copyright_year = 2023
copyright_year = 2024
header_ignore = []
header_ignore = [
// Enterprise files do not fall under the open source licensing. CE-ENT
// merge conflicts might happen here, please be sure to put new CE
// exceptions above this comment.
]
}

View File

@@ -67,6 +67,16 @@ func (a *ACLPolicies) Info(policyName string, q *QueryOptions) (*ACLPolicy, *Que
return &resp, wm, nil
}
// Self is used to query policies attached to a workload identity
func (a *ACLPolicies) Self(q *QueryOptions) ([]*ACLPolicyListStub, *QueryMeta, error) {
var resp []*ACLPolicyListStub
wm, err := a.client.query("/v1/acl/policy/self", &resp, q)
if err != nil {
return nil, nil, err
}
return resp, wm, nil
}
// ACLTokens is used to query the ACL token endpoints.
type ACLTokens struct {
client *Client
@@ -509,6 +519,7 @@ func (a *ACLAuth) Login(req *ACLLoginRequest, q *WriteOptions) (*ACLToken, *Writ
type ACLPolicyListStub struct {
Name string
Description string
JobACL *JobACL
CreateIndex uint64
ModifyIndex uint64
}
@@ -826,6 +837,13 @@ type ACLAuthMethodConfig struct {
OIDCClientID string
// The OAuth Client Secret configured with the OIDC provider
OIDCClientSecret string
// Optionally send a signed JWT ("private key jwt") as a client assertion
// to the OIDC provider
OIDCClientAssertion *OIDCClientAssertion
// Enable S256 PKCE challenge verification.
OIDCEnablePKCE bool
// Disable claims from the OIDC UserInfo endpoint
OIDCDisableUserInfo bool
// List of OIDC scopes
OIDCScopes []string
// List of auth claims that are valid for login
@@ -855,6 +873,9 @@ type ACLAuthMethodConfig struct {
// (value).
ClaimMappings map[string]string
ListClaimMappings map[string]string
// Enables logging of claims and binding-rule evaluations when
// debug level logging is enabled.
VerboseLogging bool
}
// MarshalJSON implements the json.Marshaler interface and allows
@@ -945,6 +966,118 @@ func (c *ACLAuthMethodConfig) UnmarshalJSON(data []byte) error {
return nil
}
// OIDCClientAssertionKeySource specifies what key material should be used
// to sign an OIDCClientAssertion.
type OIDCClientAssertionKeySource string
const (
// OIDCKeySourceNomad signs the OIDCClientAssertion JWT with Nomad's
// internal private key. Its public key is exposed at /.well-known/jwks.json
OIDCKeySourceNomad OIDCClientAssertionKeySource = "nomad"
// OIDCKeySourcePrivateKey signs the OIDCClientAssertion JWT with
// key material defined in OIDCClientAssertion.PrivateKey
OIDCKeySourcePrivateKey OIDCClientAssertionKeySource = "private_key"
// OIDCKeySourceClientSecret signs the OIDCClientAssertion JWT with
// ACLAuthMethod.ClientSecret
OIDCKeySourceClientSecret OIDCClientAssertionKeySource = "client_secret"
)
// OIDCClientAssertion (a.k.a private_key_jwt) is used to send
// a client_assertion along with an OIDC token request.
// Reference: https://oauth.net/private-key-jwt/
// See also: structs.OIDCClientAssertion
type OIDCClientAssertion struct {
// Audience is/are who will be processing the assertion.
// Defaults to the parent `ACLAuthMethodConfig`'s `OIDCDiscoveryURL`
Audience []string
// KeySource is where to get the private key to sign the JWT.
// It is the one field that *must* be set to enable client assertions.
// Available sources:
// - "nomad": Use current active key in Nomad's keyring
// - "private_key": Use key material in the `PrivateKey` field
// - "client_secret": Use the `OIDCClientSecret` inherited from the parent
// `ACLAuthMethodConfig` as an HMAC key
KeySource OIDCClientAssertionKeySource
// KeyAlgorithm is the key's algorithm.
// Its default values are based on the `KeySource`:
// - "nomad": "RS256" (from Nomad's keyring, must not be changed)
// - "private_key": "RS256" (must be RS256, RS384, or RS512)
// - "client_secret": "HS256" (must be HS256, HS384, or HS512)
KeyAlgorithm string
// PrivateKey contains external key material provided by users.
// `KeySource` must be "private_key" to enable this.
PrivateKey *OIDCClientAssertionKey
// ExtraHeaders are added to the JWT headers, alongside "kid" and "type"
// Setting the "kid" header here is not allowed; use `PrivateKey.KeyID`.
ExtraHeaders map[string]string
}
// OIDCClientAssertionKeyIDHeader is the header that the OIDC provider will use
// to look up the certificate or public key that it needs to verify the
// private key JWT signature.
type OIDCClientAssertionKeyIDHeader string
const (
OIDCClientAssertionHeaderKid OIDCClientAssertionKeyIDHeader = "kid"
OIDCClientAssertionHeaderX5t OIDCClientAssertionKeyIDHeader = "x5t"
OIDCClientAssertionHeaderX5tS256 OIDCClientAssertionKeyIDHeader = "x5t#S256"
)
// OIDCClientAssertionKey contains key material provided by users for Nomad
// to use to sign the private key JWT.
//
// PemKey or PemKeyFile must contain an RSA private key in PEM format.
//
// PemCert, PemCertFile may contain an x509 certificate created with
// the Key, used to derive the KeyID. Alternatively, KeyID may be set manually.
//
// PemKeyFile and PemCertFile, if set, must be an absolute path to a file
// present on disk on any Nomad servers that may become cluster leaders.
type OIDCClientAssertionKey struct {
// PemKey is an RSA private key, in pem format. It is used to sign the JWT.
// Mutually exclusive with `PemKeyFile`.
PemKey string
// PemKeyFile is an absolute path to a private key on Nomad servers' disk,
// in pem format. It is used to sign the JWT.
// Mutually exclusive with `PemKey`.
PemKeyFile string
// KeyIDHeader is which header the provider will use to find the
// public key to verify the signed JWT. Its default values vary
// based on which of the other required fields is set:
// - KeyID: "kid"
// - PemCert: "x5t#S256"
// - PemCertFile: "x5t#S256"
//
// Refer to the JWS RFC for information on these headers:
// - "kid": https://datatracker.ietf.org/doc/html/rfc7515#section-4.1.4
// - "x5t": https://datatracker.ietf.org/doc/html/rfc7515#section-4.1.7
// - "x5t#S256": https://datatracker.ietf.org/doc/html/rfc7515#section-4.1.8
//
// If you need to set some other header not supported here,
// you may use OIDCClientAssertion.ExtraHeaders.
KeyIDHeader OIDCClientAssertionKeyIDHeader
// KeyID may be set manually and becomes the "kid" header.
// Mutually exclusive with `PemCert` and `PemCertFile`.
// Allowed `KeyIDHeader` values: "kid" (the default)
KeyID string
// PemCert is an x509 certificate, signed by the private key or a CA,
// in pem format. It is used to derive an x5t#S256 (or x5t) header.
// Mutually exclusive with `PemCertFile` and `KeyID`.
// Allowed `KeyIDHeader` values: "x5t", "x5t#S256" (default "x5t#S256")
PemCert string
// PemCertFile is an absolute path to an x509 certificate on Nomad servers'
// disk, signed by the private key or a CA, in pem format.
// It is used to derive an x5t#S256 (or x5t) header.
// Mutually exclusive with `PemCert` and `KeyID`.
// Allowed `KeyIDHeader` values: "x5t", "x5t#S256" (default "x5t#S256")
PemCertFile string
}
// ACLAuthMethodListStub is the stub object returned when performing a listing
// of ACL auth-methods. It is intentionally minimal due to the unauthenticated
// nature of the list endpoint.

View File

@@ -156,7 +156,7 @@ func (a *Agent) Members() (*ServerMembers, error) {
return resp, nil
}
// Members is used to query all of the known server members
// MembersOpts is used to query all of the known server members
// with the ability to set QueryOptions
func (a *Agent) MembersOpts(opts *QueryOptions) (*ServerMembers, error) {
var resp *ServerMembers
@@ -302,15 +302,27 @@ func (a *Agent) Host(serverID, nodeID string, q *QueryOptions) (*HostDataRespons
// Monitor returns a channel which will receive streaming logs from the agent
// Providing a non-nil stopCh can be used to close the connection and stop log streaming
func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) {
frames, errCh := a.monitorHelper(stopCh, q, "/v1/agent/monitor")
return frames, errCh
}
// MonitorExport returns a channel which will receive streaming logs from the agent
// Providing a non-nil stopCh can be used to close the connection and stop log streaming
func (a *Agent) MonitorExport(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) {
frames, errCh := a.monitorHelper(stopCh, q, "/v1/agent/monitor/export")
return frames, errCh
}
func (a *Agent) monitorHelper(stopCh <-chan struct{}, q *QueryOptions, path string) (chan *StreamFrame, chan error) {
errCh := make(chan error, 1)
r, err := a.client.newRequest("GET", "/v1/agent/monitor")
r, err := a.client.newRequest("GET", path)
if err != nil {
errCh <- err
return nil, errCh
}
r.setQueryOptions(q)
_, resp, err := requireOK(a.client.doRequest(r))
_, resp, err := requireOK(a.client.doRequest(r)) //nolint:bodyclose
if err != nil {
errCh <- err
return nil, errCh

View File

@@ -235,6 +235,27 @@ func (a *Allocations) Signal(alloc *Allocation, q *QueryOptions, task, signal st
return err
}
// SetPauseState sets the schedule behavior of one task in the allocation.
func (a *Allocations) SetPauseState(alloc *Allocation, q *QueryOptions, task, state string) error {
req := AllocPauseRequest{
ScheduleState: state,
Task: task,
}
var resp GenericResponse
_, err := a.client.putQuery("/v1/client/allocation/"+alloc.ID+"/pause", &req, &resp, q)
return err
}
// GetPauseState gets the schedule behavior of one task in the allocation.
//
// The ?task=<task> query parameter must be set.
func (a *Allocations) GetPauseState(alloc *Allocation, q *QueryOptions, task string) (string, *QueryMeta, error) {
var resp AllocGetPauseResponse
qm, err := a.client.query("/v1/client/allocation/"+alloc.ID+"/pause?task="+task, &resp, q)
state := resp.ScheduleState
return state, qm, err
}
// Services is used to return a list of service registrations associated to the
// specified allocID.
func (a *Allocations) Services(allocID string, q *QueryOptions) ([]*ServiceRegistration, *QueryMeta, error) {
@@ -286,6 +307,7 @@ type AllocationMetric struct {
NodesEvaluated int
NodesFiltered int
NodesInPool int
NodePool string
NodesAvailable map[string]int
ClassFiltered map[string]int
ConstraintFiltered map[string]int
@@ -414,6 +436,7 @@ type AllocDeploymentStatus struct {
type AllocNetworkStatus struct {
InterfaceName string
Address string
AddressIPv6 string
DNS *DNSConfig
}
@@ -517,6 +540,21 @@ type AllocSignalRequest struct {
Signal string
}
type AllocPauseRequest struct {
Task string
// ScheduleState must be one of "pause", "run", "scheduled".
ScheduleState string
}
type AllocGetPauseResponse struct {
// ScheduleState will be one of "" (run), "force_run", "scheduled_pause",
// "force_pause", or "schedule_resume".
//
// See nomad/structs/task_sched.go for details.
ScheduleState string
}
// GenericResponse is used to respond to a request where no
// specific response information is needed.
type GenericResponse struct {
@@ -525,7 +563,8 @@ type GenericResponse struct {
// RescheduleTracker encapsulates previous reschedule events
type RescheduleTracker struct {
Events []*RescheduleEvent
Events []*RescheduleEvent
LastReschedule string
}
// RescheduleEvent is used to keep track of previous attempts at rescheduling an allocation

View File

@@ -109,11 +109,11 @@ func (s *execSession) startConnection() (*websocket.Conn, error) {
var conn *websocket.Conn
if nodeClient != nil {
conn, _, _ = nodeClient.websocket(reqPath, q)
conn, _, _ = nodeClient.websocket(reqPath, q) //nolint:bodyclose // gorilla/websocket Dialer.DialContext() does not require the body to be closed.
}
if conn == nil {
conn, _, err = s.client.websocket(reqPath, q)
conn, _, err = s.client.websocket(reqPath, q) //nolint:bodyclose // gorilla/websocket Dialer.DialContext() does not require the body to be closed.
if err != nil {
return nil, err
}

View File

@@ -234,7 +234,6 @@ func (c *Config) ClientConfig(region, address string, tlsEnabled bool) *Config {
HttpAuth: c.HttpAuth,
WaitTime: c.WaitTime,
TLSConfig: c.TLSConfig.Copy(),
url: copyURL(c.url),
}
// Update the tls server name for connecting to a client
@@ -435,7 +434,7 @@ func cloneWithTimeout(httpClient *http.Client, t time.Duration) (*http.Client, e
return &nc, nil
}
// ConfigureTLS applies a set of TLS configurations to the the HTTP client.
// ConfigureTLS applies a set of TLS configurations to the HTTP client.
func ConfigureTLS(httpClient *http.Client, tlsConfig *TLSConfig) error {
if tlsConfig == nil {
return nil
@@ -509,9 +508,13 @@ func NewClient(config *Config) (*Client, error) {
}
// we have to test the address that comes from DefaultConfig, because it
// could be the value of NOMAD_ADDR which is applied without testing
if config.url, err = url.Parse(config.Address); err != nil {
return nil, fmt.Errorf("invalid address '%s': %v", config.Address, err)
// could be the value of NOMAD_ADDR which is applied without testing. But
// only on the first use of this Config, otherwise we'll have mutated the
// address
if config.url == nil {
if config.url, err = url.Parse(config.Address); err != nil {
return nil, fmt.Errorf("invalid address '%s': %v", config.Address, err)
}
}
httpClient := config.HttpClient
@@ -932,7 +935,7 @@ func (c *Client) rawQuery(endpoint string, q *QueryOptions) (io.ReadCloser, erro
return nil, err
}
r.setQueryOptions(q)
_, resp, err := requireOK(c.doRequest(r))
_, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
if err != nil {
return nil, err
}
@@ -1036,7 +1039,7 @@ func (c *Client) query(endpoint string, out any, q *QueryOptions) (*QueryMeta, e
return nil, err
}
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.doRequest(r))
rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
if err != nil {
return nil, err
}
@@ -1061,7 +1064,7 @@ func (c *Client) putQuery(endpoint string, in, out any, q *QueryOptions) (*Query
}
r.setQueryOptions(q)
r.obj = in
rtt, resp, err := requireOK(c.doRequest(r))
rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
if err != nil {
return nil, err
}
@@ -1092,7 +1095,7 @@ func (c *Client) postQuery(endpoint string, in, out any, q *QueryOptions) (*Quer
}
r.setQueryOptions(q)
r.obj = in
rtt, resp, err := requireOK(c.doRequest(r))
rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
if err != nil {
return nil, err
}
@@ -1125,7 +1128,7 @@ func (c *Client) write(verb, endpoint string, in, out any, q *WriteOptions) (*Wr
}
r.setWriteOptions(q)
r.obj = in
rtt, resp, err := requireOK(c.doRequest(r))
rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
if err != nil {
return nil, err
}
@@ -1151,7 +1154,7 @@ func (c *Client) delete(endpoint string, in, out any, q *WriteOptions) (*WriteMe
}
r.setWriteOptions(q)
r.obj = in
rtt, resp, err := requireOK(c.doRequest(r))
rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility.
if err != nil {
return nil, err
}
@@ -1184,6 +1187,9 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
if err != nil {
return fmt.Errorf("Failed to parse X-Nomad-LastContact: %v", err)
}
if last > math.MaxInt64 {
return fmt.Errorf("Last contact duration is out of range: %d", last)
}
q.LastContact = time.Duration(last) * time.Millisecond
q.NextToken = header.Get("X-Nomad-NextToken")

View File

@@ -4,9 +4,9 @@
package api
import (
"maps"
"slices"
"time"
"golang.org/x/exp/maps"
)
// Consul represents configuration related to consul.
@@ -16,6 +16,10 @@ type Consul struct {
// (Enterprise-only) Cluster represents a specific Consul cluster.
Cluster string `mapstructure:"cluster" hcl:"cluster,optional"`
// Partition is the Consul admin partition where the workload should
// run. This is available in Nomad CE but only works with Consul ENT
Partition string `mapstructure:"partition" hcl:"partition,optional"`
}
// Canonicalize Consul into a canonical form. The Canonicalize structs containing
@@ -29,6 +33,9 @@ func (c *Consul) Canonicalize() {
// we should inherit from higher up (i.e. job<-group). Likewise, if
// Namespace is set but empty, that is a choice to use the default consul
// namespace.
// Partition should never be defaulted to "default" because non-ENT Consul
// clusters don't have admin partitions
}
// Copy creates a deep copy of c.
@@ -36,6 +43,7 @@ func (c *Consul) Copy() *Consul {
return &Consul{
Namespace: c.Namespace,
Cluster: c.Cluster,
Partition: c.Partition,
}
}
@@ -107,6 +115,7 @@ type SidecarTask struct {
LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"`
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"`
KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"`
VolumeMounts []*VolumeMount `hcl:"volume_mount,block"`
}
func (st *SidecarTask) Canonicalize() {
@@ -145,16 +154,25 @@ func (st *SidecarTask) Canonicalize() {
if st.ShutdownDelay == nil {
st.ShutdownDelay = pointerOf(time.Duration(0))
}
for _, vm := range st.VolumeMounts {
vm.Canonicalize()
}
}
// ConsulProxy represents a Consul Connect sidecar proxy jobspec block.
type ConsulProxy struct {
LocalServiceAddress string `mapstructure:"local_service_address" hcl:"local_service_address,optional"`
LocalServicePort int `mapstructure:"local_service_port" hcl:"local_service_port,optional"`
Expose *ConsulExposeConfig `mapstructure:"expose" hcl:"expose,block"`
ExposeConfig *ConsulExposeConfig // Deprecated: only to maintain backwards compatibility. Use Expose instead.
Upstreams []*ConsulUpstream `hcl:"upstreams,block"`
Config map[string]interface{} `hcl:"config,block"`
LocalServiceAddress string `mapstructure:"local_service_address" hcl:"local_service_address,optional"`
LocalServicePort int `mapstructure:"local_service_port" hcl:"local_service_port,optional"`
Expose *ConsulExposeConfig `mapstructure:"expose" hcl:"expose,block"`
ExposeConfig *ConsulExposeConfig // Deprecated: only to maintain backwards compatibility. Use Expose instead.
Upstreams []*ConsulUpstream `hcl:"upstreams,block"`
// TransparentProxy configures the Envoy sidecar to use "transparent
// proxying", which creates IP tables rules inside the network namespace to
// ensure traffic flows thru the Envoy proxy
TransparentProxy *ConsulTransparentProxy `mapstructure:"transparent_proxy" hcl:"transparent_proxy,block"`
Config map[string]interface{} `hcl:"config,block"`
}
func (cp *ConsulProxy) Canonicalize() {
@@ -168,6 +186,8 @@ func (cp *ConsulProxy) Canonicalize() {
cp.Upstreams = nil
}
cp.TransparentProxy.Canonicalize()
for _, upstream := range cp.Upstreams {
upstream.Canonicalize()
}
@@ -199,7 +219,6 @@ type ConsulMeshGateway struct {
func (c *ConsulMeshGateway) Canonicalize() {
// Mode may be empty string, indicating behavior will defer to Consul
// service-defaults config entry.
return
}
func (c *ConsulMeshGateway) Copy() *ConsulMeshGateway {
@@ -217,6 +236,7 @@ type ConsulUpstream struct {
DestinationName string `mapstructure:"destination_name" hcl:"destination_name,optional"`
DestinationNamespace string `mapstructure:"destination_namespace" hcl:"destination_namespace,optional"`
DestinationPeer string `mapstructure:"destination_peer" hcl:"destination_peer,optional"`
DestinationPartition string `mapstructure:"destination_partition" hcl:"destination_partition,optional"`
DestinationType string `mapstructure:"destination_type" hcl:"destination_type,optional"`
LocalBindPort int `mapstructure:"local_bind_port" hcl:"local_bind_port,optional"`
Datacenter string `mapstructure:"datacenter" hcl:"datacenter,optional"`
@@ -231,19 +251,11 @@ func (cu *ConsulUpstream) Copy() *ConsulUpstream {
if cu == nil {
return nil
}
return &ConsulUpstream{
DestinationName: cu.DestinationName,
DestinationNamespace: cu.DestinationNamespace,
DestinationPeer: cu.DestinationPeer,
DestinationType: cu.DestinationType,
LocalBindPort: cu.LocalBindPort,
Datacenter: cu.Datacenter,
LocalBindAddress: cu.LocalBindAddress,
LocalBindSocketPath: cu.LocalBindSocketPath,
LocalBindSocketMode: cu.LocalBindSocketMode,
MeshGateway: cu.MeshGateway.Copy(),
Config: maps.Clone(cu.Config),
}
up := new(ConsulUpstream)
*up = *cu
up.MeshGateway = cu.MeshGateway.Copy()
up.Config = maps.Clone(cu.Config)
return up
}
func (cu *ConsulUpstream) Canonicalize() {
@@ -256,6 +268,61 @@ func (cu *ConsulUpstream) Canonicalize() {
}
}
// ConsulTransparentProxy is used to configure the Envoy sidecar for
// "transparent proxying", which creates IP tables rules inside the network
// namespace to ensure traffic flows thru the Envoy proxy
type ConsulTransparentProxy struct {
// UID of the Envoy proxy. Defaults to the default Envoy proxy container
// image user.
UID string `mapstructure:"uid" hcl:"uid,optional"`
// OutboundPort is the Envoy proxy's outbound listener port. Inbound TCP
// traffic hitting the PROXY_IN_REDIRECT chain will be redirected here.
// Defaults to 15001.
OutboundPort uint16 `mapstructure:"outbound_port" hcl:"outbound_port,optional"`
// ExcludeInboundPorts is an additional set of ports will be excluded from
// redirection to the Envoy proxy. Can be Port.Label or Port.Value. This set
// will be added to the ports automatically excluded for the Expose.Port and
// Check.Expose fields.
ExcludeInboundPorts []string `mapstructure:"exclude_inbound_ports" hcl:"exclude_inbound_ports,optional"`
// ExcludeOutboundPorts is a set of outbound ports that will not be
// redirected to the Envoy proxy, specified as port numbers.
ExcludeOutboundPorts []uint16 `mapstructure:"exclude_outbound_ports" hcl:"exclude_outbound_ports,optional"`
// ExcludeOutboundCIDRs is a set of outbound CIDR blocks that will not be
// redirected to the Envoy proxy.
ExcludeOutboundCIDRs []string `mapstructure:"exclude_outbound_cidrs" hcl:"exclude_outbound_cidrs,optional"`
// ExcludeUIDs is a set of user IDs whose network traffic will not be
// redirected through the Envoy proxy.
ExcludeUIDs []string `mapstructure:"exclude_uids" hcl:"exclude_uids,optional"`
// NoDNS disables redirection of DNS traffic to Consul DNS. By default NoDNS
// is false and transparent proxy will direct DNS traffic to Consul DNS if
// available on the client.
NoDNS bool `mapstructure:"no_dns" hcl:"no_dns,optional"`
}
func (tp *ConsulTransparentProxy) Canonicalize() {
if tp == nil {
return
}
if len(tp.ExcludeInboundPorts) == 0 {
tp.ExcludeInboundPorts = nil
}
if len(tp.ExcludeOutboundCIDRs) == 0 {
tp.ExcludeOutboundCIDRs = nil
}
if len(tp.ExcludeOutboundPorts) == 0 {
tp.ExcludeOutboundPorts = nil
}
if len(tp.ExcludeUIDs) == 0 {
tp.ExcludeUIDs = nil
}
}
type ConsulExposeConfig struct {
Paths []*ConsulExposePath `mapstructure:"path" hcl:"path,block"`
Path []*ConsulExposePath // Deprecated: only to maintain backwards compatibility. Use Paths instead.
@@ -393,12 +460,52 @@ func (p *ConsulGatewayProxy) Copy() *ConsulGatewayProxy {
}
}
// ConsulGatewayTLSConfig is used to configure TLS for a gateway.
// ConsulGatewayTLSSDSConfig is used to configure the gateway's TLS listener to
// load certificates from an external Secret Discovery Service (SDS)
type ConsulGatewayTLSSDSConfig struct {
// ClusterName specifies the name of the SDS cluster where Consul should
// retrieve certificates.
ClusterName string `hcl:"cluster_name,optional" mapstructure:"cluster_name"`
// CertResource specifies an SDS resource name
CertResource string `hcl:"cert_resource,optional" mapstructure:"cert_resource"`
}
func (c *ConsulGatewayTLSSDSConfig) Copy() *ConsulGatewayTLSSDSConfig {
if c == nil {
return nil
}
return &ConsulGatewayTLSSDSConfig{
ClusterName: c.ClusterName,
CertResource: c.CertResource,
}
}
// ConsulGatewayTLSConfig is used to configure TLS for a gateway. Both
// ConsulIngressConfigEntry and ConsulIngressService use this struct. For more
// details, consult the Consul documentation:
// https://developer.hashicorp.com/consul/docs/connect/config-entries/ingress-gateway#listeners-services-tls
type ConsulGatewayTLSConfig struct {
Enabled bool `hcl:"enabled,optional"`
TLSMinVersion string `hcl:"tls_min_version,optional" mapstructure:"tls_min_version"`
TLSMaxVersion string `hcl:"tls_max_version,optional" mapstructure:"tls_max_version"`
CipherSuites []string `hcl:"cipher_suites,optional" mapstructure:"cipher_suites"`
// Enabled indicates whether TLS is enabled for the configuration entry
Enabled bool `hcl:"enabled,optional"`
// TLSMinVersion specifies the minimum TLS version supported for gateway
// listeners.
TLSMinVersion string `hcl:"tls_min_version,optional" mapstructure:"tls_min_version"`
// TLSMaxVersion specifies the maxmimum TLS version supported for gateway
// listeners.
TLSMaxVersion string `hcl:"tls_max_version,optional" mapstructure:"tls_max_version"`
// CipherSuites specifies a list of cipher suites that gateway listeners
// support when negotiating connections using TLS 1.2 or older.
CipherSuites []string `hcl:"cipher_suites,optional" mapstructure:"cipher_suites"`
// SDS specifies parameters that configure the listener to load TLS
// certificates from an external Secrets Discovery Service (SDS).
SDS *ConsulGatewayTLSSDSConfig `hcl:"sds,block" mapstructure:"sds"`
}
func (tc *ConsulGatewayTLSConfig) Canonicalize() {
@@ -413,6 +520,7 @@ func (tc *ConsulGatewayTLSConfig) Copy() *ConsulGatewayTLSConfig {
Enabled: tc.Enabled,
TLSMinVersion: tc.TLSMinVersion,
TLSMaxVersion: tc.TLSMaxVersion,
SDS: tc.SDS.Copy(),
}
if len(tc.CipherSuites) != 0 {
cipherSuites := make([]string, len(tc.CipherSuites))
@@ -423,13 +531,90 @@ func (tc *ConsulGatewayTLSConfig) Copy() *ConsulGatewayTLSConfig {
return result
}
// ConsulIngressService is used to configure a service fronted by the ingress gateway.
// ConsulHTTPHeaderModifiers is a set of rules for HTTP header modification that
// should be performed by proxies as the request passes through them. It can
// operate on either request or response headers depending on the context in
// which it is used.
type ConsulHTTPHeaderModifiers struct {
// Add is a set of name -> value pairs that should be appended to the
// request or response (i.e. allowing duplicates if the same header already
// exists).
Add map[string]string `hcl:"add,block" mapstructure:"add"`
// Set is a set of name -> value pairs that should be added to the request
// or response, overwriting any existing header values of the same name.
Set map[string]string `hcl:"set,block" mapstructure:"set"`
// Remove is the set of header names that should be stripped from the
// request or response.
Remove []string `hcl:"remove,optional" mapstructure:"remove"`
}
func (h *ConsulHTTPHeaderModifiers) Copy() *ConsulHTTPHeaderModifiers {
if h == nil {
return nil
}
return &ConsulHTTPHeaderModifiers{
Add: maps.Clone(h.Add),
Set: maps.Clone(h.Set),
Remove: slices.Clone(h.Remove),
}
}
func (h *ConsulHTTPHeaderModifiers) Canonicalize() {
if h == nil {
return
}
if len(h.Add) == 0 {
h.Add = nil
}
if len(h.Set) == 0 {
h.Set = nil
}
if len(h.Remove) == 0 {
h.Remove = nil
}
}
// ConsulIngressService is used to configure a service fronted by the ingress
// gateway. For more details, consult the Consul documentation:
// https://developer.hashicorp.com/consul/docs/connect/config-entries/ingress-gateway
type ConsulIngressService struct {
// Namespace is not yet supported.
// Namespace string
// Name of the service exposed through this listener.
Name string `hcl:"name,optional"`
// Hosts specifies one or more hosts that the listening services can receive
// requests on.
Hosts []string `hcl:"hosts,optional"`
// TLS specifies a TLS configuration override for a specific service. If
// unset this will fallback to the ConsulIngressConfigEntry's own TLS field.
TLS *ConsulGatewayTLSConfig `hcl:"tls,block" mapstructure:"tls"`
// RequestHeaders specifies a set of HTTP-specific header modification rules
// applied to requests routed through the gateway
RequestHeaders *ConsulHTTPHeaderModifiers `hcl:"request_headers,block" mapstructure:"request_headers"`
// ResponseHeader specifies a set of HTTP-specific header modification rules
// applied to responses routed through the gateway
ResponseHeaders *ConsulHTTPHeaderModifiers `hcl:"response_headers,block" mapstructure:"response_headers"`
// MaxConnections specifies the maximum number of HTTP/1.1 connections a
// service instance is allowed to establish against the upstream
MaxConnections *uint32 `hcl:"max_connections,optional" mapstructure:"max_connections"`
// MaxPendingRequests specifies the maximum number of requests that are
// allowed to queue while waiting to establish a connection
MaxPendingRequests *uint32 `hcl:"max_pending_requests,optional" mapstructure:"max_pending_requests"`
// MaxConcurrentRequests specifies the maximum number of concurrent HTTP/2
// traffic requests that are allowed at a single point in time
MaxConcurrentRequests *uint32 `hcl:"max_concurrent_requests,optional" mapstructure:"max_concurrent_requests"`
}
func (s *ConsulIngressService) Canonicalize() {
@@ -440,6 +625,9 @@ func (s *ConsulIngressService) Canonicalize() {
if len(s.Hosts) == 0 {
s.Hosts = nil
}
s.RequestHeaders.Canonicalize()
s.ResponseHeaders.Canonicalize()
}
func (s *ConsulIngressService) Copy() *ConsulIngressService {
@@ -447,16 +635,19 @@ func (s *ConsulIngressService) Copy() *ConsulIngressService {
return nil
}
var hosts []string = nil
if n := len(s.Hosts); n > 0 {
hosts = make([]string, n)
copy(hosts, s.Hosts)
}
ns := new(ConsulIngressService)
*ns = *s
return &ConsulIngressService{
Name: s.Name,
Hosts: hosts,
}
ns.Hosts = slices.Clone(s.Hosts)
ns.RequestHeaders = s.RequestHeaders.Copy()
ns.ResponseHeaders = s.ResponseHeaders.Copy()
ns.TLS = s.TLS.Copy()
ns.MaxConnections = pointerCopy(s.MaxConnections)
ns.MaxPendingRequests = pointerCopy(s.MaxPendingRequests)
ns.MaxConcurrentRequests = pointerCopy(s.MaxConcurrentRequests)
return ns
}
const (
@@ -514,7 +705,11 @@ type ConsulIngressConfigEntry struct {
// Namespace is not yet supported.
// Namespace string
TLS *ConsulGatewayTLSConfig `hcl:"tls,block"`
// TLS specifies a TLS configuration for the gateway.
TLS *ConsulGatewayTLSConfig `hcl:"tls,block"`
// Listeners specifies a list of listeners in the mesh for the
// gateway. Listeners are uniquely identified by their port number.
Listeners []*ConsulIngressListener `hcl:"listener,block"`
}
@@ -630,9 +825,7 @@ type ConsulMeshConfigEntry struct {
// nothing in here
}
func (e *ConsulMeshConfigEntry) Canonicalize() {
return
}
func (e *ConsulMeshConfigEntry) Canonicalize() {}
func (e *ConsulMeshConfigEntry) Copy() *ConsulMeshConfigEntry {
if e == nil {

View File

@@ -23,6 +23,7 @@ const (
Plugins Context = "plugins"
Variables Context = "vars"
Volumes Context = "volumes"
HostVolumes Context = "host_volumes"
// These Context types are used to associate a search result from a lower
// level Nomad object with one of the higher level Context types above.

View File

@@ -76,13 +76,27 @@ func (v *CSIVolumes) Info(id string, q *QueryOptions) (*CSIVolume, *QueryMeta, e
// Register registers a single CSIVolume with Nomad. The volume must already
// exist in the external storage provider.
func (v *CSIVolumes) Register(vol *CSIVolume, w *WriteOptions) (*WriteMeta, error) {
req := CSIVolumeRegisterRequest{
req := &CSIVolumeRegisterRequest{
Volumes: []*CSIVolume{vol},
}
meta, err := v.client.put("/v1/volume/csi/"+vol.ID, req, nil, w)
_, meta, err := v.RegisterOpts(req, w)
return meta, err
}
// RegisterOpts registers a single CSIVolume with Nomad. The volume must already
// exist in the external storage provider. It expects a single volume in the
// request.
func (v *CSIVolumes) RegisterOpts(req *CSIVolumeRegisterRequest, w *WriteOptions) (*CSIVolumeRegisterResponse, *WriteMeta, error) {
if w == nil {
w = &WriteOptions{}
}
vol := req.Volumes[0]
resp := &CSIVolumeRegisterResponse{}
meta, err := v.client.put("/v1/volume/csi/"+vol.ID, req, resp, w)
return resp, meta, err
}
// Deregister deregisters a single CSIVolume from Nomad. The volume will not be deleted from the external storage provider.
func (v *CSIVolumes) Deregister(id string, force bool, w *WriteOptions) error {
_, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v?force=%t", url.PathEscape(id), force), nil, nil, w)
@@ -97,15 +111,28 @@ func (v *CSIVolumes) Create(vol *CSIVolume, w *WriteOptions) ([]*CSIVolume, *Wri
Volumes: []*CSIVolume{vol},
}
resp := &CSIVolumeCreateResponse{}
meta, err := v.client.put(fmt.Sprintf("/v1/volume/csi/%v/create", vol.ID), req, resp, w)
resp, meta, err := v.CreateOpts(&req, w)
return resp.Volumes, meta, err
}
// DEPRECATED: will be removed in Nomad 1.4.0
// CreateOpts creates a single CSIVolume in an external storage provider and
// registers it with Nomad. You do not need to call Register if this call is
// successful. It expects a single volume in the request.
func (v *CSIVolumes) CreateOpts(req *CSIVolumeCreateRequest, w *WriteOptions) (*CSIVolumeCreateResponse, *WriteMeta, error) {
if w == nil {
w = &WriteOptions{}
}
vol := req.Volumes[0]
resp := &CSIVolumeCreateResponse{}
meta, err := v.client.put(fmt.Sprintf("/v1/volume/csi/%v/create", vol.ID), req, resp, w)
return resp, meta, err
}
// Delete deletes a CSI volume from an external storage provider. The ID
// passed as an argument here is for the storage provider's ID, so a volume
// that's already been deregistered can be deleted.
//
// Deprecated: will be removed in Nomad 1.4.0
func (v *CSIVolumes) Delete(externalVolID string, w *WriteOptions) error {
_, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v/delete", url.PathEscape(externalVolID)), nil, nil, w)
return err
@@ -184,8 +211,9 @@ func (v *CSIVolumes) ListSnapshotsOpts(req *CSISnapshotListRequest) (*CSISnapsho
return resp, qm, nil
}
// DEPRECATED: will be removed in Nomad 1.4.0
// ListSnapshots lists external storage volume snapshots.
//
// Deprecated: will be removed in Nomad 1.4.0
func (v *CSIVolumes) ListSnapshots(pluginID string, secrets string, q *QueryOptions) (*CSISnapshotListResponse, *QueryMeta, error) {
var resp *CSISnapshotListResponse
@@ -269,26 +297,26 @@ func (o *CSIMountOptions) Merge(p *CSIMountOptions) {
// API or in Nomad's logs.
type CSISecrets map[string]string
func (q *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
func (o *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
pairs := []string{}
for k, v := range secrets {
pairs = append(pairs, fmt.Sprintf("%v=%v", k, v))
}
if q.Headers == nil {
q.Headers = map[string]string{}
if o.Headers == nil {
o.Headers = map[string]string{}
}
q.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
o.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
}
func (w *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
func (o *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) {
pairs := []string{}
for k, v := range secrets {
pairs = append(pairs, fmt.Sprintf("%v=%v", k, v))
}
if w.Headers == nil {
w.Headers = map[string]string{}
if o.Headers == nil {
o.Headers = map[string]string{}
}
w.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
o.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",")
}
// CSIVolume is used for serialization, see also nomad/structs/csi.go
@@ -351,6 +379,11 @@ type CSIVolume struct {
CreateIndex uint64
ModifyIndex uint64
// CreateTime stored as UnixNano
CreateTime int64
// ModifyTime stored as UnixNano
ModifyTime int64
// ExtraKeysHCL is used by the hcl parser to report unexpected keys
ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"`
}
@@ -401,6 +434,11 @@ type CSIVolumeListStub struct {
CreateIndex uint64
ModifyIndex uint64
// CreateTime stored as UnixNano
CreateTime int64
// ModifyTime stored as UnixNano
ModifyTime int64
}
type CSIVolumeListExternalResponse struct {
@@ -440,19 +478,33 @@ func (v CSIVolumeExternalStubSort) Swap(i, j int) {
type CSIVolumeCreateRequest struct {
Volumes []*CSIVolume
// PolicyOverride overrides Sentinel soft-mandatory policy enforcement
PolicyOverride bool
WriteRequest
}
type CSIVolumeCreateResponse struct {
Volumes []*CSIVolume
Volumes []*CSIVolume
Warnings string
QueryMeta
}
type CSIVolumeRegisterRequest struct {
Volumes []*CSIVolume
// PolicyOverride overrides Sentinel soft-mandatory policy enforcement
PolicyOverride bool
WriteRequest
}
type CSIVolumeRegisterResponse struct {
Volumes []*CSIVolume
Warnings string
}
type CSIVolumeDeregisterRequest struct {
VolumeIDs []string
WriteRequest
@@ -507,7 +559,7 @@ type CSISnapshotCreateResponse struct {
}
// CSISnapshotListRequest is a request to a controller plugin to list all the
// snapshot known to the the storage provider. This request is paginated by
// snapshot known to the storage provider. This request is paginated by
// the plugin and accepts the QueryOptions.PerPage and QueryOptions.NextToken
// fields
type CSISnapshotListRequest struct {
@@ -543,6 +595,11 @@ type CSIPlugin struct {
NodesExpected int
CreateIndex uint64
ModifyIndex uint64
// CreateTime stored as UnixNano
CreateTime int64
// ModifyTime stored as UnixNano
ModifyTime int64
}
type CSIPluginListStub struct {

View File

@@ -193,6 +193,10 @@ type Deployment struct {
CreateIndex uint64
ModifyIndex uint64
// Creation and modification times, stored as UnixNano
CreateTime int64
ModifyTime int64
}
// DeploymentState tracks the state of a deployment for a given task group.
@@ -261,6 +265,9 @@ type DeploymentPromoteRequest struct {
// Groups is used to set the promotion status per task group
Groups []string
// PromotedAt is the timestamp stored as Unix nano
PromotedAt int64
WriteRequest
}

View File

@@ -186,7 +186,7 @@ func (e *EventStream) Stream(ctx context.Context, topics map[Topic][]string, ind
}
}
_, resp, err := requireOK(e.client.doRequest(r))
_, resp, err := requireOK(e.client.doRequest(r)) //nolint:bodyclose
if err != nil {
return nil, err

View File

@@ -389,12 +389,23 @@ func (f *FrameReader) Read(p []byte) (n int, err error) {
case <-unblock:
return 0, nil
case err := <-f.errCh:
return 0, err
// check for race with f.frames before returning error
select {
case frame, ok := <-f.frames:
if !ok {
return 0, io.EOF
}
f.frame = frame
// Store the total offset into the file
f.byteOffset = int(f.frame.Offset)
default:
return 0, err
}
case <-f.cancelCh:
return 0, io.EOF
}
}
// Copy the data out of the frame and update our offset
n = copy(p, f.frame.Data[f.frameOffset:])
f.frameOffset += n

View File

@@ -0,0 +1,70 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package api
import "net/url"
// TaskGroupHostVolumeClaim associates a task group with a host volume ID. It's
// used for stateful deployments, i.e., volume requests with "sticky" set to
// true.
type TaskGroupHostVolumeClaim struct {
ID string `mapstructure:"id"`
Namespace string `mapstructure:"namespace"`
JobID string `mapstructure:"job_id"`
TaskGroupName string `mapstructure:"task_group_name"`
AllocID string `mapstructure:"alloc_id"`
VolumeID string `mapstructure:"volume_id"`
VolumeName string `mapstructure:"volume_name"`
CreateIndex uint64
ModifyIndex uint64
}
// TaskGroupHostVolumeClaims is used to access the API.
type TaskGroupHostVolumeClaims struct {
client *Client
}
// TaskGroupHostVolumeClaims returns a new handle on the API.
func (c *Client) TaskGroupHostVolumeClaims() *TaskGroupHostVolumeClaims {
return &TaskGroupHostVolumeClaims{client: c}
}
type TaskGroupHostVolumeClaimsListRequest struct {
JobID string
TaskGroup string
VolumeName string
}
func (tgvc *TaskGroupHostVolumeClaims) List(req *TaskGroupHostVolumeClaimsListRequest, opts *QueryOptions) ([]*TaskGroupHostVolumeClaim, *QueryMeta, error) {
qv := url.Values{}
if req != nil {
if req.JobID != "" {
qv.Set("job_id", req.JobID)
}
if req.TaskGroup != "" {
qv.Set("task_group", req.TaskGroup)
}
if req.VolumeName != "" {
qv.Set("volume_name", req.VolumeName)
}
}
var out []*TaskGroupHostVolumeClaim
qm, err := tgvc.client.query("/v1/volumes/claims?"+qv.Encode(), &out, opts)
if err != nil {
return nil, qm, err
}
return out, qm, nil
}
func (tgvc *TaskGroupHostVolumeClaims) Delete(claimID string, opts *WriteOptions) (*WriteMeta, error) {
path, err := url.JoinPath("/v1/volumes/claim", url.PathEscape(claimID))
if err != nil {
return nil, err
}
wm, err := tgvc.client.delete(path, nil, nil, opts)
return wm, err
}

253
vendor/github.com/hashicorp/nomad/api/host_volumes.go generated vendored Normal file
View File

@@ -0,0 +1,253 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package api
import "net/url"
// HostVolume represents a Dynamic Host Volume: a volume associated with a
// specific Nomad client agent but created via API.
type HostVolume struct {
// Namespace is the Nomad namespace for the host volume, which constrains
// which jobs can mount it.
Namespace string `mapstructure:"namespace" hcl:"namespace"`
// ID is a UUID-like string generated by the server.
ID string `mapstructure:"id" hcl:"id"`
// Name is the name that group.volume will use to identify the volume
// source. Not expected to be unique.
Name string `mapstructure:"name" hcl:"name"`
// PluginID is the name of the host volume plugin on the client that will be
// used for creating the volume. If omitted, the client will use its default
// built-in plugin.
PluginID string `mapstructure:"plugin_id" hcl:"plugin_id"`
// NodePool is the node pool of the node where the volume is placed. If the
// user doesn't provide a node ID, a node will be selected using the
// NodePool and Constraints. If the user provides both NodePool and NodeID,
// NodePool will be used to validate the request. If omitted, the server
// will populate this value in before writing the volume to Raft.
NodePool string `mapstructure:"node_pool" hcl:"node_pool"`
// NodeID is the node where the volume is placed. If the user doesn't
// provide a NodeID, one will be selected using the NodePool and
// Constraints. If omitted, this field will then be populated by the server
// before writing the volume to Raft.
NodeID string `mapstructure:"node_id" hcl:"node_id"`
// Constraints are optional. If the NodeID is not provided, the NodePool and
// Constraints are used to select a node. If the NodeID is provided,
// Constraints are used to validate that the node meets those constraints at
// the time of volume creation.
Constraints []*Constraint `json:",omitempty" hcl:"constraint"`
// Because storage may allow only specific intervals of size, we accept a
// min and max and return the actual capacity when the volume is created or
// updated on the client
RequestedCapacityMinBytes int64 `mapstructure:"capacity_min" hcl:"capacity_min"`
RequestedCapacityMaxBytes int64 `mapstructure:"capacity_max" hcl:"capacity_max"`
CapacityBytes int64 `mapstructure:"capacity" hcl:"capacity"`
// RequestedCapabilities defines the options available to group.volume
// blocks. The scheduler checks against the listed capability blocks and
// selects a node for placement if *any* capability block works.
RequestedCapabilities []*HostVolumeCapability `hcl:"capability"`
// Parameters are an opaque map of parameters for the host volume plugin.
Parameters map[string]string `json:",omitempty"`
// HostPath is the path on disk where the volume's mount point was
// created. We record this to make debugging easier.
HostPath string `mapstructure:"host_path" hcl:"host_path"`
// State represents the overall state of the volume. One of pending, ready,
// deleted.
State HostVolumeState
CreateIndex uint64
CreateTime int64
ModifyIndex uint64
ModifyTime int64
// Allocations is the list of non-client-terminal allocations with claims on
// this host volume. They are denormalized on read and this field will be
// never written to Raft
Allocations []*AllocationListStub `json:",omitempty" mapstructure:"-" hcl:"-"`
}
// HostVolume state reports the current status of the host volume
type HostVolumeState string
const (
HostVolumeStatePending HostVolumeState = "pending"
HostVolumeStateReady HostVolumeState = "ready"
HostVolumeStateUnavailable HostVolumeState = "unavailable"
)
// HostVolumeCapability is the requested attachment and access mode for a volume
type HostVolumeCapability struct {
AttachmentMode HostVolumeAttachmentMode `mapstructure:"attachment_mode" hcl:"attachment_mode"`
AccessMode HostVolumeAccessMode `mapstructure:"access_mode" hcl:"access_mode"`
}
// HostVolumeAttachmentMode chooses the type of storage API that will be used to
// interact with the device.
type HostVolumeAttachmentMode string
const (
HostVolumeAttachmentModeUnknown HostVolumeAttachmentMode = ""
HostVolumeAttachmentModeBlockDevice HostVolumeAttachmentMode = "block-device"
HostVolumeAttachmentModeFilesystem HostVolumeAttachmentMode = "file-system"
)
// HostVolumeAccessMode indicates how Nomad should make the volume available to
// concurrent allocations.
type HostVolumeAccessMode string
const (
HostVolumeAccessModeUnknown HostVolumeAccessMode = ""
HostVolumeAccessModeSingleNodeReader HostVolumeAccessMode = "single-node-reader-only"
HostVolumeAccessModeSingleNodeWriter HostVolumeAccessMode = "single-node-writer"
HostVolumeAccessModeSingleNodeSingleWriter HostVolumeAccessMode = "single-node-single-writer"
HostVolumeAccessModeSingleNodeMultiWriter HostVolumeAccessMode = "single-node-multi-writer"
)
// HostVolumeStub is used for responses for the List Volumes endpoint
type HostVolumeStub struct {
Namespace string
ID string
Name string
PluginID string
NodePool string
NodeID string
CapacityBytes int64
State HostVolumeState
CreateIndex uint64
CreateTime int64
ModifyIndex uint64
ModifyTime int64
}
// HostVolumes is used to access the host volumes API.
type HostVolumes struct {
client *Client
}
// HostVolumes returns a new handle on the host volumes API.
func (c *Client) HostVolumes() *HostVolumes {
return &HostVolumes{client: c}
}
type HostVolumeCreateRequest struct {
Volume *HostVolume
// PolicyOverride overrides Sentinel soft-mandatory policy enforcement
PolicyOverride bool
}
type HostVolumeRegisterRequest struct {
Volume *HostVolume
// PolicyOverride overrides Sentinel soft-mandatory policy enforcement
PolicyOverride bool
}
type HostVolumeCreateResponse struct {
Volume *HostVolume
Warnings string
}
type HostVolumeRegisterResponse struct {
Volume *HostVolume
Warnings string
}
type HostVolumeListRequest struct {
NodeID string
NodePool string
}
type HostVolumeDeleteRequest struct {
ID string
Force bool
}
type HostVolumeDeleteResponse struct{}
// Create forwards to client agents so a host volume can be created on those
// hosts, and registers the volume with Nomad servers.
func (hv *HostVolumes) Create(req *HostVolumeCreateRequest, opts *WriteOptions) (*HostVolumeCreateResponse, *WriteMeta, error) {
var out *HostVolumeCreateResponse
wm, err := hv.client.put("/v1/volume/host/create", req, &out, opts)
if err != nil {
return nil, wm, err
}
return out, wm, nil
}
// Register registers a host volume that was created out-of-band with the Nomad
// servers.
func (hv *HostVolumes) Register(req *HostVolumeRegisterRequest, opts *WriteOptions) (*HostVolumeRegisterResponse, *WriteMeta, error) {
var out *HostVolumeRegisterResponse
wm, err := hv.client.put("/v1/volume/host/register", req, &out, opts)
if err != nil {
return nil, wm, err
}
return out, wm, nil
}
// Get queries for a single host volume, by ID
func (hv *HostVolumes) Get(id string, opts *QueryOptions) (*HostVolume, *QueryMeta, error) {
var out *HostVolume
path, err := url.JoinPath("/v1/volume/host/", url.PathEscape(id))
if err != nil {
return nil, nil, err
}
qm, err := hv.client.query(path, &out, opts)
if err != nil {
return nil, qm, err
}
return out, qm, nil
}
// List queries for a set of host volumes, by namespace, node, node pool, or
// name prefix.
func (hv *HostVolumes) List(req *HostVolumeListRequest, opts *QueryOptions) ([]*HostVolumeStub, *QueryMeta, error) {
var out []*HostVolumeStub
qv := url.Values{}
qv.Set("type", "host")
if req != nil {
if req.NodeID != "" {
qv.Set("node_id", req.NodeID)
}
if req.NodePool != "" {
qv.Set("node_pool", req.NodePool)
}
}
qm, err := hv.client.query("/v1/volumes?"+qv.Encode(), &out, opts)
if err != nil {
return nil, qm, err
}
return out, qm, nil
}
// Delete deletes a host volume
func (hv *HostVolumes) Delete(req *HostVolumeDeleteRequest, opts *WriteOptions) (*HostVolumeDeleteResponse, *WriteMeta, error) {
var resp *HostVolumeDeleteResponse
path, err := url.JoinPath("/v1/volume/host/", url.PathEscape(req.ID))
if err != nil {
return nil, nil, err
}
if req.Force {
path = path + "?force=true"
}
wm, err := hv.client.delete(path, nil, resp, opts)
return resp, wm, err
}

View File

@@ -8,13 +8,13 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/url"
"sort"
"strconv"
"time"
"github.com/hashicorp/cronexpr"
"golang.org/x/exp/maps"
)
const (
@@ -74,9 +74,6 @@ type JobsParseRequest struct {
// JobHCL is an hcl jobspec
JobHCL string
// HCLv1 indicates whether the JobHCL should be parsed with the hcl v1 parser
HCLv1 bool `json:"hclv1,omitempty"`
// Variables are HCL2 variables associated with the job. Only works with hcl2.
//
// Interpreted as if it were the content of a variables file.
@@ -104,7 +101,7 @@ func (j *Jobs) ParseHCL(jobHCL string, canonicalize bool) (*Job, error) {
}
// ParseHCLOpts is used to request the server convert the HCL representation of a
// Job to JSON on our behalf. Accepts HCL1 or HCL2 jobs as input.
// Job to JSON on our behalf. Only accepts HCL2 jobs as input.
func (j *Jobs) ParseHCLOpts(req *JobsParseRequest) (*Job, error) {
var job Job
_, err := j.client.put("/v1/jobs/parse", req, &job, nil)
@@ -179,7 +176,7 @@ func (j *Jobs) List(q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {
return j.ListOptions(nil, q)
}
// List is used to list all of the existing jobs.
// ListOptions is used to list all of the existing jobs.
func (j *Jobs) ListOptions(opts *JobListOptions, q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {
var resp []*JobListStub
@@ -215,8 +212,7 @@ func (j *Jobs) Info(jobID string, q *QueryOptions) (*Job, *QueryMeta, error) {
return &resp, qm, nil
}
// Scale is used to retrieve information about a particular
// job given its unique ID.
// Scale is used to scale a job.
func (j *Jobs) Scale(jobID, group string, count *int, message string, error bool, meta map[string]interface{},
q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
@@ -242,6 +238,17 @@ func (j *Jobs) Scale(jobID, group string, count *int, message string, error bool
return &resp, qm, nil
}
// ScaleWithRequest is used to scale a job, giving the caller complete control
// over the ScalingRequest
func (j *Jobs) ScaleWithRequest(jobID string, req *ScalingRequest, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
var resp JobRegisterResponse
qm, err := j.client.put(fmt.Sprintf("/v1/job/%s/scale", url.PathEscape(jobID)), req, &resp, q)
if err != nil {
return nil, nil, err
}
return &resp, qm, nil
}
// ScaleStatus is used to retrieve information about a particular
// job given its unique ID.
func (j *Jobs) ScaleStatus(jobID string, q *QueryOptions) (*JobScaleStatusResponse, *QueryMeta, error) {
@@ -256,8 +263,50 @@ func (j *Jobs) ScaleStatus(jobID string, q *QueryOptions) (*JobScaleStatusRespon
// Versions is used to retrieve all versions of a particular job given its
// unique ID.
func (j *Jobs) Versions(jobID string, diffs bool, q *QueryOptions) ([]*Job, []*JobDiff, *QueryMeta, error) {
opts := &VersionsOptions{
Diffs: diffs,
}
return j.VersionsOpts(jobID, opts, q)
}
// VersionByTag is used to retrieve a job version by its VersionTag name.
func (j *Jobs) VersionByTag(jobID, tag string, q *QueryOptions) (*Job, *QueryMeta, error) {
versions, _, qm, err := j.Versions(jobID, false, q)
if err != nil {
return nil, nil, err
}
// Find the version with the matching tag
for _, version := range versions {
if version.VersionTag != nil && version.VersionTag.Name == tag {
return version, qm, nil
}
}
return nil, nil, fmt.Errorf("version tag %s not found for job %s", tag, jobID)
}
type VersionsOptions struct {
Diffs bool
DiffTag string
DiffVersion *uint64
}
func (j *Jobs) VersionsOpts(jobID string, opts *VersionsOptions, q *QueryOptions) ([]*Job, []*JobDiff, *QueryMeta, error) {
var resp JobVersionsResponse
qm, err := j.client.query(fmt.Sprintf("/v1/job/%s/versions?diffs=%v", url.PathEscape(jobID), diffs), &resp, q)
qp := url.Values{}
if opts != nil {
qp.Add("diffs", strconv.FormatBool(opts.Diffs))
if opts.DiffTag != "" {
qp.Add("diff_tag", opts.DiffTag)
}
if opts.DiffVersion != nil {
qp.Add("diff_version", strconv.FormatUint(*opts.DiffVersion, 10))
}
}
qm, err := j.client.query(fmt.Sprintf("/v1/job/%s/versions?%s", url.PathEscape(jobID), qp.Encode()), &resp, q)
if err != nil {
return nil, nil, nil, err
}
@@ -274,6 +323,7 @@ func (j *Jobs) Submission(jobID string, version int, q *QueryOptions) (*JobSubmi
if err != nil {
return nil, nil, err
}
return &sub, qm, nil
}
@@ -479,16 +529,39 @@ func (j *Jobs) Summary(jobID string, q *QueryOptions) (*JobSummary, *QueryMeta,
return &resp, qm, nil
}
// DispatchOptions is used to pass through job dispatch parameters
type DispatchOptions struct {
JobID string
Meta map[string]string
Payload []byte
IdPrefixTemplate string
Priority int
}
func (j *Jobs) Dispatch(jobID string, meta map[string]string,
payload []byte, idPrefixTemplate string, q *WriteOptions) (*JobDispatchResponse, *WriteMeta, error) {
var resp JobDispatchResponse
req := &JobDispatchRequest{
return j.DispatchOpts(&DispatchOptions{
JobID: jobID,
Meta: meta,
Payload: payload,
IdPrefixTemplate: idPrefixTemplate,
IdPrefixTemplate: idPrefixTemplate},
q,
)
}
// DispatchOpts is used to dispatch a new job with the passed DispatchOpts. It
// returns the ID of the evaluation, along with any errors encountered.
func (j *Jobs) DispatchOpts(opts *DispatchOptions, q *WriteOptions) (*JobDispatchResponse, *WriteMeta, error) {
var resp JobDispatchResponse
req := &JobDispatchRequest{
JobID: opts.JobID,
Meta: opts.Meta,
Payload: opts.Payload,
IdPrefixTemplate: opts.IdPrefixTemplate,
Priority: opts.Priority,
}
wm, err := j.client.put("/v1/job/"+url.PathEscape(jobID)+"/dispatch", req, &resp, q)
wm, err := j.client.put("/v1/job/"+url.PathEscape(opts.JobID)+"/dispatch", req, &resp, q)
if err != nil {
return nil, nil, err
}
@@ -499,15 +572,13 @@ func (j *Jobs) Dispatch(jobID string, meta map[string]string,
// enforceVersion is set, the job is only reverted if the current version is at
// the passed version.
func (j *Jobs) Revert(jobID string, version uint64, enforcePriorVersion *uint64,
q *WriteOptions, consulToken, vaultToken string) (*JobRegisterResponse, *WriteMeta, error) {
q *WriteOptions, _ string, _ string) (*JobRegisterResponse, *WriteMeta, error) {
var resp JobRegisterResponse
req := &JobRevertRequest{
JobID: jobID,
JobVersion: version,
EnforcePriorVersion: enforcePriorVersion,
ConsulToken: consulToken,
VaultToken: vaultToken,
}
wm, err := j.client.put("/v1/job/"+url.PathEscape(jobID)+"/revert", req, &resp, q)
if err != nil {
@@ -918,10 +989,11 @@ type ParameterizedJobConfig struct {
// the job submission.
type JobSubmission struct {
// Source contains the original job definition (may be in the format of
// hcl1, hcl2, or json).
// hcl1, hcl2, or json). HCL1 jobs can no longer be parsed.
Source string
// Format indicates what the Source content was (hcl1, hcl2, or json).
// Format indicates what the Source content was (hcl1, hcl2, or json). HCL1
// jobs can no longer be parsed.
Format string
// VariableFlags contains the CLI "-var" flag arguments as submitted with the
@@ -933,6 +1005,70 @@ type JobSubmission struct {
Variables string
}
type JobUIConfig struct {
Description string `hcl:"description,optional"`
Links []*JobUILink `hcl:"link,block"`
}
type JobUILink struct {
Label string `hcl:"label,optional"`
URL string `hcl:"url,optional"`
}
func (j *JobUIConfig) Canonicalize() {
if j == nil {
return
}
if len(j.Links) == 0 {
j.Links = nil
}
}
func (j *JobUIConfig) Copy() *JobUIConfig {
if j == nil {
return nil
}
copy := new(JobUIConfig)
copy.Description = j.Description
for _, link := range j.Links {
copy.Links = append(copy.Links, link.Copy())
}
return copy
}
func (j *JobUILink) Copy() *JobUILink {
if j == nil {
return nil
}
return &JobUILink{
Label: j.Label,
URL: j.URL,
}
}
type JobVersionTag struct {
Name string
Description string
TaggedTime int64
}
func (j *JobVersionTag) Copy() *JobVersionTag {
if j == nil {
return nil
}
return &JobVersionTag{
Name: j.Name,
Description: j.Description,
TaggedTime: j.TaggedTime,
}
}
func (js *JobSubmission) Canonicalize() {
if js == nil {
return
@@ -941,6 +1077,13 @@ func (js *JobSubmission) Canonicalize() {
if len(js.VariableFlags) == 0 {
js.VariableFlags = nil
}
// if there are multiline variables, make sure we escape the newline
// characters to preserve them. This way, when the job gets stopped and
// restarted in the UI, variable values will be parsed correctly.
for k, v := range js.VariableFlags {
js.VariableFlags[k] = url.QueryEscape(v)
}
}
func (js *JobSubmission) Copy() *JobSubmission {
@@ -980,8 +1123,7 @@ type Job struct {
Reschedule *ReschedulePolicy `hcl:"reschedule,block"`
Migrate *MigrateStrategy `hcl:"migrate,block"`
Meta map[string]string `hcl:"meta,block"`
ConsulToken *string `mapstructure:"consul_token" hcl:"consul_token,optional"`
VaultToken *string `mapstructure:"vault_token" hcl:"vault_token,optional"`
UI *JobUIConfig `hcl:"ui,block"`
/* Fields set by server, not sourced from job config file */
@@ -1001,6 +1143,7 @@ type Job struct {
CreateIndex *uint64
ModifyIndex *uint64
JobModifyIndex *uint64
VersionTag *JobVersionTag
}
// IsPeriodic returns whether a job is periodic.
@@ -1049,15 +1192,9 @@ func (j *Job) Canonicalize() {
if j.AllAtOnce == nil {
j.AllAtOnce = pointerOf(false)
}
if j.ConsulToken == nil {
j.ConsulToken = pointerOf("")
}
if j.ConsulNamespace == nil {
j.ConsulNamespace = pointerOf("")
}
if j.VaultToken == nil {
j.VaultToken = pointerOf("")
}
if j.VaultNamespace == nil {
j.VaultNamespace = pointerOf("")
}
@@ -1107,6 +1244,10 @@ func (j *Job) Canonicalize() {
for _, a := range j.Affinities {
a.Canonicalize()
}
if j.UI != nil {
j.UI.Canonicalize()
}
}
// LookupTaskGroup finds a task group by name
@@ -1279,6 +1420,15 @@ func (j *Job) AddSpread(s *Spread) *Job {
return j
}
func (j *Job) GetScalingPoliciesPerTaskGroup() map[string]*ScalingPolicy {
ret := map[string]*ScalingPolicy{}
for _, tg := range j.TaskGroups {
ret[*tg.Name] = tg.Scaling
}
return ret
}
type WriteRequest struct {
// The target region for this write
Region string
@@ -1325,18 +1475,6 @@ type JobRevertRequest struct {
// version before reverting.
EnforcePriorVersion *uint64
// ConsulToken is the Consul token that proves the submitter of the job revert
// has access to the Service Identity policies associated with the job's
// Consul Connect enabled services. This field is only used to transfer the
// token and is not stored after the Job revert.
ConsulToken string `json:",omitempty"`
// VaultToken is the Vault token that proves the submitter of the job revert
// has access to any Vault policies specified in the targeted job version. This
// field is only used to authorize the revert and is not stored after the Job
// revert.
VaultToken string `json:",omitempty"`
WriteRequest
}
@@ -1465,6 +1603,7 @@ type JobDispatchRequest struct {
Payload []byte
Meta map[string]string
IdPrefixTemplate string
Priority int
}
type JobDispatchResponse struct {
@@ -1544,3 +1683,31 @@ func (j *Jobs) ActionExec(ctx context.Context,
return s.run(ctx)
}
// JobStatusesRequest is used to get statuses for jobs,
// their allocations and deployments.
type JobStatusesRequest struct {
// Jobs may be optionally provided to request a subset of specific jobs.
Jobs []NamespacedID
// IncludeChildren will include child (batch) jobs in the response.
IncludeChildren bool
}
type TagVersionRequest struct {
Version uint64
Description string
WriteRequest
}
func (j *Jobs) TagVersion(jobID string, version uint64, name string, description string, q *WriteOptions) (*WriteMeta, error) {
var tagRequest = &TagVersionRequest{
Version: version,
Description: description,
}
return j.client.put("/v1/job/"+url.PathEscape(jobID)+"/versions/"+name+"/tag", tagRequest, nil, q)
}
func (j *Jobs) UntagVersion(jobID string, name string, q *WriteOptions) (*WriteMeta, error) {
return j.client.delete("/v1/job/"+url.PathEscape(jobID)+"/versions/"+name+"/tag", nil, nil, q)
}

View File

@@ -6,6 +6,7 @@ package api
import (
"fmt"
"net/url"
"strconv"
)
// Keyring is used to access the Variables keyring.
@@ -34,16 +35,18 @@ type RootKeyMeta struct {
CreateIndex uint64
ModifyIndex uint64
State RootKeyState
PublishTime int64
}
// RootKeyState enum describes the lifecycle of a root key.
type RootKeyState string
const (
RootKeyStateInactive RootKeyState = "inactive"
RootKeyStateActive = "active"
RootKeyStateRekeying = "rekeying"
RootKeyStateDeprecated = "deprecated"
RootKeyStateInactive RootKeyState = "inactive"
RootKeyStateActive = "active"
RootKeyStateRekeying = "rekeying"
RootKeyStateDeprecated = "deprecated"
RootKeyStatePrepublished = "prepublished"
)
// List lists all the keyring metadata
@@ -58,14 +61,17 @@ func (k *Keyring) List(q *QueryOptions) ([]*RootKeyMeta, *QueryMeta, error) {
// Delete deletes a specific inactive key from the keyring
func (k *Keyring) Delete(opts *KeyringDeleteOptions, w *WriteOptions) (*WriteMeta, error) {
wm, err := k.client.delete(fmt.Sprintf("/v1/operator/keyring/key/%v",
url.PathEscape(opts.KeyID)), nil, nil, w)
wm, err := k.client.delete(fmt.Sprintf("/v1/operator/keyring/key/%v?force=%v",
url.PathEscape(opts.KeyID), strconv.FormatBool(opts.Force)), nil, nil, w)
return wm, err
}
// KeyringDeleteOptions are parameters for the Delete API
type KeyringDeleteOptions struct {
KeyID string // UUID
// Force can be used to force deletion of a root keyring that was used to encrypt
// an existing variable or to sign a workload identity
Force bool
}
// Rotate requests a key rotation
@@ -78,6 +84,9 @@ func (k *Keyring) Rotate(opts *KeyringRotateOptions, w *WriteOptions) (*RootKeyM
if opts.Full {
qp.Set("full", "true")
}
if opts.PublishTime > 0 {
qp.Set("publish_time", fmt.Sprintf("%d", opts.PublishTime))
}
}
resp := &struct{ Key *RootKeyMeta }{}
wm, err := k.client.put("/v1/operator/keyring/rotate?"+qp.Encode(), nil, resp, w)
@@ -86,6 +95,7 @@ func (k *Keyring) Rotate(opts *KeyringRotateOptions, w *WriteOptions) (*RootKeyM
// KeyringRotateOptions are parameters for the Rotate API
type KeyringRotateOptions struct {
Full bool
Algorithm EncryptionAlgorithm
Full bool
Algorithm EncryptionAlgorithm
PublishTime int64
}

View File

@@ -85,8 +85,10 @@ type Namespace struct {
// NamespaceCapabilities represents a set of capabilities allowed for this
// namespace, to be checked at job submission time.
type NamespaceCapabilities struct {
EnabledTaskDrivers []string `hcl:"enabled_task_drivers"`
DisabledTaskDrivers []string `hcl:"disabled_task_drivers"`
EnabledTaskDrivers []string `hcl:"enabled_task_drivers"`
DisabledTaskDrivers []string `hcl:"disabled_task_drivers"`
EnabledNetworkModes []string `hcl:"enabled_network_modes"`
DisabledNetworkModes []string `hcl:"disabled_network_modes"`
}
// NamespaceNodePoolConfiguration stores configuration about node pools for a
@@ -156,3 +158,12 @@ func (n NamespaceIndexSort) Less(i, j int) bool {
func (n NamespaceIndexSort) Swap(i, j int) {
n[i], n[j] = n[j], n[i]
}
// NamespacedID is used for things that are unique only per-namespace,
// such as jobs.
type NamespacedID struct {
// Namespace is the Name of the Namespace
Namespace string
// ID is the ID of the namespaced object (e.g. Job ID)
ID string
}

View File

@@ -126,7 +126,7 @@ func (n *Nodes) UpdateDrain(nodeID string, spec *DrainSpec, markEligible bool, q
return resp, err
}
// UpdateDrainWithMeta is used to update the drain strategy for a given node. If
// UpdateDrainOpts is used to update the drain strategy for a given node. If
// markEligible is true and the drain is being removed, the node will be marked
// as having its scheduling being eligible
func (n *Nodes) UpdateDrainOpts(nodeID string, opts *DrainOptions, q *WriteOptions) (*NodeDrainUpdateResponse,
@@ -478,7 +478,7 @@ func (n *Nodes) GC(nodeID string, q *QueryOptions) error {
return err
}
// TODO Add tests
// GcAlloc - TODO Add tests
func (n *Nodes) GcAlloc(allocID string, q *QueryOptions) error {
path := fmt.Sprintf("/v1/client/allocation/%s/gc", allocID)
_, err := n.client.query(path, nil, q)
@@ -517,6 +517,8 @@ type DriverInfo struct {
type HostVolumeInfo struct {
Path string
ReadOnly bool
// ID is set for dynamic host volumes only.
ID string
}
// HostNetworkInfo is used to return metadata about a given HostNetwork
@@ -564,12 +566,14 @@ type Node struct {
Events []*NodeEvent
Drivers map[string]*DriverInfo
HostVolumes map[string]*HostVolumeInfo
GCVolumesOnNodeGC bool
HostNetworks map[string]*HostNetworkInfo
CSIControllerPlugins map[string]*CSIInfo
CSINodePlugins map[string]*CSIInfo
LastDrain *DrainMetadata
CreateIndex uint64
ModifyIndex uint64
NodeMaxAllocs int
}
type NodeResources struct {
@@ -783,6 +787,7 @@ type HostStats struct {
Memory *HostMemoryStats
CPU []*HostCPUStats
DiskStats []*HostDiskStats
AllocDirStats *HostDiskStats
DeviceStats []*DeviceGroupStats
Uptime uint64
CPUTicksConsumed float64

View File

@@ -8,6 +8,7 @@ import (
"errors"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
@@ -66,7 +67,7 @@ func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, e
return nil, err
}
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
if err != nil {
return nil, err
}
@@ -82,6 +83,10 @@ func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, e
// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
// quorum but no longer known to Serf or the catalog) by address in the form of
// "IP:port".
//
// DEPRECATED: this method supported Raft Protocol v2, which was removed from
// Nomad in 1.4.0. The address parameter of the HTTP endpoint has been made
// non-function in Nomad 1.10.x and will be removed in Nomad 1.12.0.
func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
r, err := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
if err != nil {
@@ -91,7 +96,7 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err
r.params.Set("address", address)
_, resp, err := requireOK(op.c.doRequest(r))
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
if err != nil {
return err
}
@@ -111,7 +116,7 @@ func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error {
r.params.Set("id", id)
_, resp, err := requireOK(op.c.doRequest(r))
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
if err != nil {
return err
}
@@ -131,7 +136,7 @@ func (op *Operator) RaftTransferLeadershipByAddress(address string, q *WriteOpti
r.params.Set("address", address)
_, resp, err := requireOK(op.c.doRequest(r))
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
if err != nil {
return err
}
@@ -151,7 +156,7 @@ func (op *Operator) RaftTransferLeadershipByID(id string, q *WriteOptions) error
r.params.Set("id", id)
_, resp, err := requireOK(op.c.doRequest(r))
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
if err != nil {
return err
}
@@ -262,7 +267,7 @@ func (op *Operator) Snapshot(q *QueryOptions) (io.ReadCloser, error) {
return nil, err
}
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
_, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
if err != nil {
return nil, err
}
@@ -273,7 +278,6 @@ func (op *Operator) Snapshot(q *QueryOptions) (io.ReadCloser, error) {
if err != nil {
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
return nil, err
}
@@ -355,7 +359,7 @@ func (op *Operator) ApplyLicense(license string, opts *ApplyLicenseOptions, q *W
r.setWriteOptions(q)
r.body = strings.NewReader(license)
rtt, resp, err := requireOK(op.c.doRequest(r))
rtt, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose
if err != nil {
return nil, err
}
@@ -375,7 +379,7 @@ func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, *QueryMeta, erro
req.setQueryOptions(q)
var reply LicenseReply
rtt, resp, err := op.c.doRequest(req)
rtt, resp, err := op.c.doRequest(req) //nolint:bodyclose
if err != nil {
return nil, nil, err
}
@@ -412,3 +416,88 @@ type LeadershipTransferResponse struct {
WriteMeta
}
// VaultWorkloadIdentityUpgradeCheck is the result of verifying if the cluster
// is ready to switch to workload identities for Vault.
type VaultWorkloadIdentityUpgradeCheck struct {
// JobsWithoutVaultIdentity is the list of jobs that have a `vault` block
// but do not have an `identity` for Vault.
JobsWithoutVaultIdentity []*JobListStub
// OutdatedNodes is the list of nodes running a version of Nomad that does
// not support workload identities for Vault.
OutdatedNodes []*NodeListStub
// VaultTokens is the list of Vault ACL token accessors that Nomad created
// and will no longer manage after the cluster is migrated to workload
// identities.
VaultTokens []*VaultAccessor
}
// Ready returns true if the cluster is ready to migrate to workload identities
// with Vault.
func (v *VaultWorkloadIdentityUpgradeCheck) Ready() bool {
return v != nil &&
len(v.VaultTokens) == 0 &&
len(v.OutdatedNodes) == 0 &&
len(v.JobsWithoutVaultIdentity) == 0
}
// VaultAccessor is a Vault ACL token created by Nomad for a task to access
// Vault using the legacy authentication flow.
type VaultAccessor struct {
// AllocID is the ID of the allocation that requested this token.
AllocID string
// Task is the name of the task that requested this token.
Task string
// NodeID is the ID of the node running the allocation that requested this
// token.
NodeID string
// Accessor is the Vault ACL token accessor ID.
Accessor string
// CreationTTL is the TTL set when the token was created.
CreationTTL int
// CreateIndex is the Raft index when the token was created.
CreateIndex uint64
}
// UpgradeCheckVaultWorkloadIdentity retrieves the cluster status for migrating
// to workload identities with Vault.
func (op *Operator) UpgradeCheckVaultWorkloadIdentity(q *QueryOptions) (*VaultWorkloadIdentityUpgradeCheck, *QueryMeta, error) {
var resp VaultWorkloadIdentityUpgradeCheck
qm, err := op.c.query("/v1/operator/upgrade-check/vault-workload-identity", &resp, q)
if err != nil {
return nil, nil, err
}
return &resp, qm, nil
}
type OperatorUtilizationOptions struct {
TodayOnly bool
}
type OperatorUtilizationSnapshotResponse struct {
// Bundle is the JSON serialized utilization reporting bundle.
Bundle []byte
WriteMeta
}
// Utilization retrieves a utilization reporting bundle (Nomad Enterprise only).
func (op *Operator) Utilization(opts *OperatorUtilizationOptions, w *WriteOptions) (*OperatorUtilizationSnapshotResponse, *WriteMeta, error) {
resp := &OperatorUtilizationSnapshotResponse{}
v := url.Values{}
if opts.TodayOnly {
v.Add("today", "true")
}
wm, err := op.c.post("/v1/operator/utilization?"+v.Encode(), nil, resp, w)
if err != nil {
return nil, nil, err
}
return resp, wm, nil
}

View File

@@ -178,6 +178,86 @@ type OperatorHealthReply struct {
// Servers holds the health of each server.
Servers []ServerHealth
// The ID of the current leader.
Leader string
// List of servers that are voters in the Raft configuration.
Voters []string
// ReadReplicas holds the list of servers that are
// read replicas in the Raft configuration. (Enterprise only)
ReadReplicas []string `json:",omitempty"`
// RedundancyZones holds the list of servers in each redundancy zone.
// (Enterprise only)
RedundancyZones map[string]AutopilotZone `json:",omitempty"`
// Upgrade holds the current upgrade status.
Upgrade *AutopilotUpgrade `json:",omitempty"`
// The number of servers that could be lost without an outage
// occurring if all the voters don't fail at once. (Enterprise only)
OptimisticFailureTolerance int `json:",omitempty"`
}
// AutopilotZone holds the list of servers in a redundancy zone. (Enterprise only)
type AutopilotZone struct {
// Servers holds the list of servers in the redundancy zone.
Servers []string
// Voters holds the list of servers that are voters in the redundancy zone.
Voters []string
// FailureTolerance is the number of servers that could be lost without an
// outage occurring.
FailureTolerance int
}
// AutopilotUpgrade holds the current upgrade status. (Enterprise only)
type AutopilotUpgrade struct {
// Status of the upgrade.
Status string
// TargetVersion is the version that the cluster is upgrading to.
TargetVersion string
// TargetVersionVoters holds the list of servers that are voters in the Raft
// configuration of the TargetVersion.
TargetVersionVoters []string
// TargetVersionNonVoters holds the list of servers that are non-voters in
// the Raft configuration of the TargetVersion.
TargetVersionNonVoters []string
// TargetVersionReadReplicas holds the list of servers that are read
// replicas in the Raft configuration of the TargetVersion.
TargetVersionReadReplicas []string
// OtherVersionVoters holds the list of servers that are voters in the Raft
// configuration of a version other than the TargetVersion.
OtherVersionVoters []string
// OtherVersionNonVoters holds the list of servers that are non-voters in
// the Raft configuration of a version other than the TargetVersion.
OtherVersionNonVoters []string
// OtherVersionReadReplicas holds the list of servers that are read replicas
// in the Raft configuration of a version other than the TargetVersion.
OtherVersionReadReplicas []string
// RedundancyZones holds the list of servers in each redundancy zone for the
// TargetVersion.
RedundancyZones map[string]AutopilotZoneUpgradeVersions
}
// AutopilotZoneUpgradeVersions holds the list of servers
// in a redundancy zone for a specific version. (Enterprise only)
type AutopilotZoneUpgradeVersions struct {
TargetVersionVoters []string
TargetVersionNonVoters []string
OtherVersionVoters []string
OtherVersionNonVoters []string
}
// AutopilotGetConfiguration is used to query the current Autopilot configuration.

View File

@@ -51,7 +51,7 @@ func (q *Quotas) ListUsage(qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error)
return resp, qm, nil
}
// PrefixList is used to do a PrefixList search over quota usages
// PrefixListUsage is used to do a PrefixList search over quota usages
func (q *Quotas) PrefixListUsage(prefix string, qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) {
if qo == nil {
qo = &QueryOptions{Prefix: prefix}
@@ -127,17 +127,43 @@ type QuotaLimit struct {
// referencing namespace in the region. A value of zero is treated as
// unlimited and a negative value is treated as fully disallowed. This is
// useful for once we support GPUs
RegionLimit *Resources
RegionLimit *QuotaResources
// VariablesLimit is the maximum total size of all variables
// Variable.EncryptedData. A value of zero is treated as unlimited and a
// negative value is treated as fully disallowed.
//
// DEPRECATED: use RegionLimit.Storage.VariablesMB instead. This field will
// be removed in Nomad 1.12.0.
VariablesLimit *int `mapstructure:"variables_limit" hcl:"variables_limit,optional"`
// Hash is the hash of the object and is used to make replication efficient.
Hash []byte
}
type QuotaResources struct {
CPU *int `hcl:"cpu,optional"`
Cores *int `hcl:"cores,optional"`
MemoryMB *int `mapstructure:"memory" hcl:"memory,optional"`
MemoryMaxMB *int `mapstructure:"memory_max" hcl:"memory_max,optional"`
Devices []*RequestedDevice `hcl:"device,block"`
NUMA *NUMAResource `hcl:"numa,block"`
SecretsMB *int `mapstructure:"secrets" hcl:"secrets,optional"`
Storage *QuotaStorageResources `mapstructure:"storage" hcl:"storage,block"`
}
type QuotaStorageResources struct {
// VariablesMB is the maximum total size of all variables
// Variable.EncryptedData, in megabytes (2^20 bytes). A value of zero is
// treated as unlimited and a negative value is treated as fully disallowed.
VariablesMB int `hcl:"variables"`
// HostVolumesMB is the maximum provisioned size of all dynamic host
// volumes, in megabytes (2^20 bytes). A value of zero is treated as
// unlimited and a negative value is treated as fully disallowed.
HostVolumesMB int `hcl:"host_volumes"`
}
// QuotaUsage is the resource usage of a Quota
type QuotaUsage struct {
Name string

View File

@@ -4,6 +4,7 @@
package api
import (
"slices"
"strconv"
)
@@ -18,6 +19,7 @@ type Resources struct {
Networks []*NetworkResource `hcl:"network,block"`
Devices []*RequestedDevice `hcl:"device,block"`
NUMA *NUMAResource `hcl:"numa,block"`
SecretsMB *int `mapstructure:"secrets" hcl:"secrets,optional"`
// COMPAT(0.10)
// XXX Deprecated. Please do not use. The field will be removed in Nomad
@@ -103,6 +105,9 @@ func (r *Resources) Merge(other *Resources) {
if other.NUMA != nil {
r.NUMA = other.NUMA.Copy()
}
if other.SecretsMB != nil {
r.SecretsMB = other.SecretsMB
}
}
// NUMAResource contains the NUMA affinity request for scheduling purposes.
@@ -111,6 +116,10 @@ func (r *Resources) Merge(other *Resources) {
type NUMAResource struct {
// Affinity must be one of "none", "prefer", "require".
Affinity string `hcl:"affinity,optional"`
// Devices is the subset of devices requested by the task that must share
// the same numa node, along with the tasks reserved cpu cores.
Devices []string `hcl:"devices,optional"`
}
func (n *NUMAResource) Copy() *NUMAResource {
@@ -119,6 +128,7 @@ func (n *NUMAResource) Copy() *NUMAResource {
}
return &NUMAResource{
Affinity: n.Affinity,
Devices: slices.Clone(n.Devices),
}
}
@@ -129,13 +139,17 @@ func (n *NUMAResource) Canonicalize() {
if n.Affinity == "" {
n.Affinity = "none"
}
if len(n.Devices) == 0 {
n.Devices = nil
}
}
type Port struct {
Label string `hcl:",label"`
Value int `hcl:"static,optional"`
To int `hcl:"to,optional"`
HostNetwork string `hcl:"host_network,optional"`
Label string `hcl:",label"`
Value int `hcl:"static,optional"`
To int `hcl:"to,optional"`
HostNetwork string `hcl:"host_network,optional"`
IgnoreCollision bool `hcl:"ignore_collision,optional"`
}
type DNSConfig struct {
@@ -143,6 +157,9 @@ type DNSConfig struct {
Searches []string `mapstructure:"searches" hcl:"searches,optional"`
Options []string `mapstructure:"options" hcl:"options,optional"`
}
type CNIConfig struct {
Args map[string]string `hcl:"args,optional"`
}
// NetworkResource is used to describe required network
// resources of a given task.
@@ -160,11 +177,14 @@ type NetworkResource struct {
// XXX Deprecated. Please do not use. The field will be removed in Nomad
// 0.13 and is only being kept to allow any references to be removed before
// then.
MBits *int `hcl:"mbits,optional"`
MBits *int `hcl:"mbits,optional"`
CNI *CNIConfig `hcl:"cni,block"`
}
// Megabits should not be used.
//
// COMPAT(0.13)
// XXX Deprecated. Please do not use. The method will be removed in Nomad
// Deprecated. Please do not use. The method will be removed in Nomad
// 0.13 and is only being kept to allow any references to be removed before
// then.
func (n *NetworkResource) Megabits() int {

View File

@@ -39,7 +39,7 @@ func (c *Client) retryPut(ctx context.Context, endpoint string, in, out any, q *
var err error
var wm *WriteMeta
attemptDelay := time.Duration(100 * time.Second) // Avoid a tick before starting
attemptDelay := 100 * time.Second // Avoid a tick before starting
startTime := time.Now()
t := time.NewTimer(attemptDelay)
@@ -60,7 +60,7 @@ func (c *Client) retryPut(ctx context.Context, endpoint string, in, out any, q *
wm, err = c.put(endpoint, in, out, q)
// Maximum retry period is up, don't retry
if c.config.retryOptions.maxToLastCall != 0 && time.Now().Sub(startTime) > c.config.retryOptions.maxToLastCall {
if c.config.retryOptions.maxToLastCall != 0 && time.Since(startTime) > c.config.retryOptions.maxToLastCall {
break
}

View File

@@ -57,8 +57,13 @@ type ScalingRequest struct {
Error bool
Meta map[string]interface{}
WriteRequest
// this is effectively a job update, so we need the ability to override policy.
PolicyOverride bool
// If JobModifyIndex is set then the job will only be scaled if it matches
// the current Jobs index. The JobModifyIndex is ignored if 0.
JobModifyIndex uint64
}
// ScalingPolicy is the user-specified API object for an autoscaling policy

View File

@@ -64,7 +64,8 @@ func (s *Search) FuzzySearch(text string, context contexts.Context, q *QueryOpti
// ID.
//
// e.g. A Task-level service would have scope like,
// ["<namespace>", "<job>", "<group>", "<task>"]
//
// ["<namespace>", "<job>", "<group>", "<task>"]
type FuzzyMatch struct {
ID string // ID is UUID or Name of object
Scope []string `json:",omitempty"` // IDs of parent objects

View File

@@ -82,3 +82,10 @@ type SentinelPolicyListStub struct {
CreateIndex uint64
ModifyIndex uint64
}
// Possible Sentinel scopes
const (
SentinelScopeSubmitJob = "submit-job"
SentinelScopeSubmitHostVolume = "submit-host-volume"
SentinelScopeSubmitCSIVolume = "submit-csi-volume"
)

View File

@@ -212,6 +212,7 @@ type ServiceCheck struct {
Interval time.Duration `hcl:"interval,optional"`
Timeout time.Duration `hcl:"timeout,optional"`
InitialStatus string `mapstructure:"initial_status" hcl:"initial_status,optional"`
Notes string `hcl:"notes,optional"`
TLSServerName string `mapstructure:"tls_server_name" hcl:"tls_server_name,optional"`
TLSSkipVerify bool `mapstructure:"tls_skip_verify" hcl:"tls_skip_verify,optional"`
Header map[string][]string `hcl:"header,block"`
@@ -222,6 +223,7 @@ type ServiceCheck struct {
TaskName string `mapstructure:"task" hcl:"task,optional"`
SuccessBeforePassing int `mapstructure:"success_before_passing" hcl:"success_before_passing,optional"`
FailuresBeforeCritical int `mapstructure:"failures_before_critical" hcl:"failures_before_critical,optional"`
FailuresBeforeWarning int `mapstructure:"failures_before_warning" hcl:"failures_before_warning,optional"`
Body string `hcl:"body,optional"`
OnUpdate string `mapstructure:"on_update" hcl:"on_update,optional"`
}
@@ -244,13 +246,17 @@ type Service struct {
TaskName string `mapstructure:"task" hcl:"task,optional"`
OnUpdate string `mapstructure:"on_update" hcl:"on_update,optional"`
Identity *WorkloadIdentity `hcl:"identity,block"`
Weights *ServiceWeights `mapstructure:"weights" hcl:"weights,block"`
// Provider defines which backend system provides the service registration,
// either "consul" (default) or "nomad".
Provider string `hcl:"provider,optional"`
// Cluster is valid only for Nomad Enterprise with provider: consul
Cluster string `hcl:"cluster,optional`
Cluster string `hcl:"cluster,optional"`
// Kind defines the consul service kind, valid only when provider: consul
Kind string `hcl:"kind,optional"`
}
const (
@@ -305,6 +311,7 @@ func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
}
s.Connect.Canonicalize()
s.Weights.Canonicalize()
// Canonicalize CheckRestart on Checks and merge Service.CheckRestart
// into each check.
@@ -320,9 +327,33 @@ func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) {
s.Checks[i].FailuresBeforeCritical = 0
}
if s.Checks[i].FailuresBeforeWarning < 0 {
s.Checks[i].FailuresBeforeWarning = 0
}
// Inhert Service
if s.Checks[i].OnUpdate == "" {
s.Checks[i].OnUpdate = s.OnUpdate
}
}
}
// ServiceWeights is the jobspec block which configures how a service instance
// is weighted in a DNS SRV request based on the service's health status.
type ServiceWeights struct {
Passing int `hcl:"passing,optional"`
Warning int `hcl:"warning,optional"`
}
func (weights *ServiceWeights) Canonicalize() {
if weights == nil {
return
}
if weights.Passing <= 0 {
weights.Passing = 1
}
if weights.Warning <= 0 {
weights.Warning = 1
}
}

14
vendor/github.com/hashicorp/nomad/api/task_sched.go generated vendored Normal file
View File

@@ -0,0 +1,14 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package api
type TaskSchedule struct {
Cron *TaskScheduleCron `hcl:"cron,block"`
}
type TaskScheduleCron struct {
Start string `hcl:"start,optional"`
End string `hcl:"end,optional"`
Timezone string `hcl:"timezone,optional"`
}

View File

@@ -11,6 +11,8 @@ import (
"time"
)
type ReconcileOption = string
const (
// RestartPolicyModeDelay causes an artificial delay till the next interval is
// reached when the specified attempts have been reached in the interval.
@@ -19,6 +21,14 @@ const (
// RestartPolicyModeFail causes a job to fail if the specified number of
// attempts are reached within an interval.
RestartPolicyModeFail = "fail"
// ReconcileOption is used to specify the behavior of the reconciliation process
// between the original allocations and the replacements when a previously
// disconnected client comes back online.
ReconcileOptionKeepOriginal = "keep_original"
ReconcileOptionKeepReplacement = "keep_replacement"
ReconcileOptionBestScore = "best_score"
ReconcileOptionLongestRunning = "longest_running"
)
// MemoryStats holds memory usage related stats
@@ -113,6 +123,37 @@ func (r *RestartPolicy) Merge(rp *RestartPolicy) {
}
}
// Disconnect strategy defines how both clients and server should behave in case of
// disconnection between them.
type DisconnectStrategy struct {
// Defines for how long the server will consider the unresponsive node as
// disconnected but alive instead of lost.
LostAfter *time.Duration `mapstructure:"lost_after" hcl:"lost_after,optional"`
// Defines for how long a disconnected client will keep its allocations running.
StopOnClientAfter *time.Duration `mapstructure:"stop_on_client_after" hcl:"stop_on_client_after,optional"`
// A boolean field used to define if the allocations should be replaced while
// it's considered disconnected.
Replace *bool `mapstructure:"replace" hcl:"replace,optional"`
// Once the disconnected node starts reporting again, it will define which
// instances to keep: the original allocations, the replacement, the one
// running on the node with the best score as it is currently implemented,
// or the allocation that has been running continuously the longest.
Reconcile *ReconcileOption `mapstructure:"reconcile" hcl:"reconcile,optional"`
}
func (ds *DisconnectStrategy) Canonicalize() {
if ds.Replace == nil {
ds.Replace = pointerOf(true)
}
if ds.Reconcile == nil {
ds.Reconcile = pointerOf(ReconcileOptionBestScore)
}
}
// Reschedule configures how Tasks are rescheduled when they crash or fail.
type ReschedulePolicy struct {
// Attempts limits the number of rescheduling attempts that can occur in an interval.
@@ -195,7 +236,7 @@ func NewAffinity(lTarget string, operand string, rTarget string, weight int8) *A
LTarget: lTarget,
RTarget: rTarget,
Operand: operand,
Weight: pointerOf(int8(weight)),
Weight: pointerOf(weight),
}
}
@@ -205,6 +246,14 @@ func (a *Affinity) Canonicalize() {
}
}
func NewDefaultDisconnectStrategy() *DisconnectStrategy {
return &DisconnectStrategy{
LostAfter: pointerOf(0 * time.Minute),
Replace: pointerOf(true),
Reconcile: pointerOf(ReconcileOptionBestScore),
}
}
func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy {
var dp *ReschedulePolicy
switch jobType {
@@ -268,14 +317,14 @@ func (r *ReschedulePolicy) Copy() *ReschedulePolicy {
return nrp
}
func (p *ReschedulePolicy) String() string {
if p == nil {
func (r *ReschedulePolicy) String() string {
if r == nil {
return ""
}
if *p.Unlimited {
return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *p.DelayFunction, *p.MaxDelay)
if *r.Unlimited {
return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *r.DelayFunction, *r.MaxDelay)
}
return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *p.Attempts, *p.Interval, *p.DelayFunction, *p.MaxDelay)
return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *r.Attempts, *r.Interval, *r.DelayFunction, *r.MaxDelay)
}
// Spread is used to serialize task group allocation spread preferences
@@ -301,7 +350,7 @@ func NewSpreadTarget(value string, percent uint8) *SpreadTarget {
func NewSpread(attribute string, weight int8, spreadTargets []*SpreadTarget) *Spread {
return &Spread{
Attribute: attribute,
Weight: pointerOf(int8(weight)),
Weight: pointerOf(weight),
SpreadTarget: spreadTargets,
}
}
@@ -406,6 +455,7 @@ type VolumeRequest struct {
Type string `hcl:"type,optional"`
Source string `hcl:"source,optional"`
ReadOnly bool `hcl:"read_only,optional"`
Sticky bool `hcl:"sticky,optional"`
AccessMode string `hcl:"access_mode,optional"`
AttachmentMode string `hcl:"attachment_mode,optional"`
MountOptions *CSIMountOptions `hcl:"mount_options,block"`
@@ -426,40 +476,50 @@ type VolumeMount struct {
Destination *string `hcl:"destination,optional"`
ReadOnly *bool `mapstructure:"read_only" hcl:"read_only,optional"`
PropagationMode *string `mapstructure:"propagation_mode" hcl:"propagation_mode,optional"`
SELinuxLabel *string `mapstructure:"selinux_label" hcl:"selinux_label,optional"`
}
func (vm *VolumeMount) Canonicalize() {
if vm.PropagationMode == nil {
vm.PropagationMode = pointerOf(VolumeMountPropagationPrivate)
}
if vm.ReadOnly == nil {
vm.ReadOnly = pointerOf(false)
}
if vm.SELinuxLabel == nil {
vm.SELinuxLabel = pointerOf("")
}
}
// TaskGroup is the unit of scheduling.
type TaskGroup struct {
Name *string `hcl:"name,label"`
Count *int `hcl:"count,optional"`
Constraints []*Constraint `hcl:"constraint,block"`
Affinities []*Affinity `hcl:"affinity,block"`
Tasks []*Task `hcl:"task,block"`
Spreads []*Spread `hcl:"spread,block"`
Volumes map[string]*VolumeRequest `hcl:"volume,block"`
RestartPolicy *RestartPolicy `hcl:"restart,block"`
ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"`
EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"`
Update *UpdateStrategy `hcl:"update,block"`
Migrate *MigrateStrategy `hcl:"migrate,block"`
Networks []*NetworkResource `hcl:"network,block"`
Meta map[string]string `hcl:"meta,block"`
Services []*Service `hcl:"service,block"`
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"`
StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"`
MaxClientDisconnect *time.Duration `mapstructure:"max_client_disconnect" hcl:"max_client_disconnect,optional"`
Scaling *ScalingPolicy `hcl:"scaling,block"`
Consul *Consul `hcl:"consul,block"`
PreventRescheduleOnLost *bool `hcl:"prevent_reschedule_on_lost,optional"`
Name *string `hcl:"name,label"`
Count *int `hcl:"count,optional"`
Constraints []*Constraint `hcl:"constraint,block"`
Affinities []*Affinity `hcl:"affinity,block"`
Tasks []*Task `hcl:"task,block"`
Spreads []*Spread `hcl:"spread,block"`
Volumes map[string]*VolumeRequest `hcl:"volume,block"`
RestartPolicy *RestartPolicy `hcl:"restart,block"`
Disconnect *DisconnectStrategy `hcl:"disconnect,block"`
ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"`
EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"`
Update *UpdateStrategy `hcl:"update,block"`
Migrate *MigrateStrategy `hcl:"migrate,block"`
Networks []*NetworkResource `hcl:"network,block"`
Meta map[string]string `hcl:"meta,block"`
Services []*Service `hcl:"service,block"`
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"`
// Deprecated: StopAfterClientDisconnect is deprecated in Nomad 1.8 and ignored in Nomad 1.10. Use Disconnect.StopOnClientAfter.
StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"`
// Deprecated: MaxClientDisconnect is deprecated in Nomad 1.8.0 and ignored in Nomad 1.10. Use Disconnect.LostAfter.
MaxClientDisconnect *time.Duration `mapstructure:"max_client_disconnect" hcl:"max_client_disconnect,optional"`
Scaling *ScalingPolicy `hcl:"scaling,block"`
Consul *Consul `hcl:"consul,block"`
// Deprecated: PreventRescheduleOnLost is deprecated in Nomad 1.8.0 and ignored in Nomad 1.10. Use Disconnect.Replace.
PreventRescheduleOnLost *bool `hcl:"prevent_reschedule_on_lost,optional"`
}
// NewTaskGroup creates a new TaskGroup.
@@ -493,11 +553,10 @@ func (g *TaskGroup) Canonicalize(job *Job) {
}
// Merge job.consul onto group.consul
if g.Consul == nil {
g.Consul = new(Consul)
if g.Consul != nil {
g.Consul.MergeNamespace(job.ConsulNamespace)
g.Consul.Canonicalize()
}
g.Consul.MergeNamespace(job.ConsulNamespace)
g.Consul.Canonicalize()
// Merge the update policy from the job
if ju, tu := job.Update != nil, g.Update != nil; ju && tu {
@@ -531,6 +590,7 @@ func (g *TaskGroup) Canonicalize(job *Job) {
if g.ReschedulePolicy != nil {
g.ReschedulePolicy.Canonicalize(*job.Type)
}
// Merge the migrate strategy from the job
if jm, tm := job.Migrate != nil, g.Migrate != nil; jm && tm {
jobMigrate := job.Migrate.Copy()
@@ -578,8 +638,9 @@ func (g *TaskGroup) Canonicalize(job *Job) {
for _, s := range g.Services {
s.Canonicalize(nil, g, job)
}
if g.PreventRescheduleOnLost == nil {
g.PreventRescheduleOnLost = pointerOf(false)
if g.Disconnect != nil {
g.Disconnect.Canonicalize()
}
}
@@ -613,7 +674,7 @@ func (g *TaskGroup) Constrain(c *Constraint) *TaskGroup {
return g
}
// AddMeta is used to add a meta k/v pair to a task group
// SetMeta is used to add a meta k/v pair to a task group
func (g *TaskGroup) SetMeta(key, val string) *TaskGroup {
if g.Meta == nil {
g.Meta = make(map[string]string)
@@ -646,6 +707,12 @@ func (g *TaskGroup) AddSpread(s *Spread) *TaskGroup {
return g
}
// ScalingPolicy is used to add a new scaling policy to a task group.
func (g *TaskGroup) ScalingPolicy(sp *ScalingPolicy) *TaskGroup {
g.Scaling = sp
return g
}
// LogConfig provides configuration for log rotation
type LogConfig struct {
MaxFiles *int `mapstructure:"max_files" hcl:"max_files,optional"`
@@ -690,13 +757,13 @@ const (
)
type TaskLifecycle struct {
Hook string `mapstructure:"hook" hcl:"hook,optional"`
Hook string `mapstructure:"hook" hcl:"hook"`
Sidecar bool `mapstructure:"sidecar" hcl:"sidecar,optional"`
}
// Determine if lifecycle has user-input values
// Empty determines if lifecycle has user-input values
func (l *TaskLifecycle) Empty() bool {
return l == nil || (l.Hook == "")
return l == nil
}
// Task is a single process in a task group.
@@ -736,6 +803,8 @@ type Task struct {
Identities []*WorkloadIdentity `hcl:"identity,block"`
Actions []*Action `hcl:"action,block"`
Schedule *TaskSchedule `hcl:"schedule,block"`
}
func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {
@@ -791,17 +860,22 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {
// TaskArtifact is used to download artifacts before running a task.
type TaskArtifact struct {
GetterSource *string `mapstructure:"source" hcl:"source,optional"`
GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"`
GetterHeaders map[string]string `mapstructure:"headers" hcl:"headers,block"`
GetterMode *string `mapstructure:"mode" hcl:"mode,optional"`
RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"`
GetterSource *string `mapstructure:"source" hcl:"source,optional"`
GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"`
GetterHeaders map[string]string `mapstructure:"headers" hcl:"headers,block"`
GetterMode *string `mapstructure:"mode" hcl:"mode,optional"`
GetterInsecure *bool `mapstructure:"insecure" hcl:"insecure,optional"`
RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"`
Chown bool `mapstructure:"chown" hcl:"chown,optional"`
}
func (a *TaskArtifact) Canonicalize() {
if a.GetterMode == nil {
a.GetterMode = pointerOf("any")
}
if a.GetterInsecure == nil {
a.GetterInsecure = pointerOf(false)
}
if a.GetterSource == nil {
// Shouldn't be possible, but we don't want to panic
a.GetterSource = pointerOf("")
@@ -874,6 +948,7 @@ type Template struct {
ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"`
ChangeScript *ChangeScript `mapstructure:"change_script" hcl:"change_script,block"`
ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"`
Once *bool `mapstructure:"once" hcl:"once,optional"`
Splay *time.Duration `mapstructure:"splay" hcl:"splay,optional"`
Perms *string `mapstructure:"perms" hcl:"perms,optional"`
Uid *int `mapstructure:"uid" hcl:"uid,optional"`
@@ -912,6 +987,9 @@ func (tmpl *Template) Canonicalize() {
if tmpl.ChangeScript != nil {
tmpl.ChangeScript.Canonicalize()
}
if tmpl.Once == nil {
tmpl.Once = pointerOf(false)
}
if tmpl.Splay == nil {
tmpl.Splay = pointerOf(5 * time.Second)
}
@@ -937,14 +1015,15 @@ func (tmpl *Template) Canonicalize() {
}
type Vault struct {
Policies []string `hcl:"policies,optional"`
Role string `hcl:"role,optional"`
Namespace *string `mapstructure:"namespace" hcl:"namespace,optional"`
Cluster string `hcl:"cluster,optional"`
Env *bool `hcl:"env,optional"`
DisableFile *bool `mapstructure:"disable_file" hcl:"disable_file,optional"`
ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"`
ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"`
Policies []string `hcl:"policies,optional"`
Role string `hcl:"role,optional"`
Namespace *string `mapstructure:"namespace" hcl:"namespace,optional"`
Cluster string `hcl:"cluster,optional"`
Env *bool `hcl:"env,optional"`
DisableFile *bool `mapstructure:"disable_file" hcl:"disable_file,optional"`
ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"`
ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"`
AllowTokenExpiration *bool `mapstructure:"allow_token_expiration" hcl:"allow_token_expiration,optional"`
}
func (v *Vault) Canonicalize() {
@@ -966,6 +1045,9 @@ func (v *Vault) Canonicalize() {
if v.ChangeSignal == nil {
v.ChangeSignal = pointerOf("SIGHUP")
}
if v.AllowTokenExpiration == nil {
v.AllowTokenExpiration = pointerOf(false)
}
}
// NewTask creates and initializes a new Task.
@@ -976,7 +1058,7 @@ func NewTask(name, driver string) *Task {
}
}
// Configure is used to configure a single k/v pair on
// SetConfig is used to configure a single k/v pair on
// the task.
func (t *Task) SetConfig(key string, val interface{}) *Task {
if t.Config == nil {
@@ -1001,7 +1083,7 @@ func (t *Task) Require(r *Resources) *Task {
return t
}
// Constraint adds a new constraints to a single task.
// Constrain adds a new constraints to a single task.
func (t *Task) Constrain(c *Constraint) *Task {
t.Constraints = append(t.Constraints, c)
return t
@@ -1035,17 +1117,6 @@ type TaskState struct {
StartedAt time.Time
FinishedAt time.Time
Events []*TaskEvent
// Experimental - TaskHandle is based on drivers.TaskHandle and used
// by remote task drivers to migrate task handles between allocations.
TaskHandle *TaskHandle
}
// Experimental - TaskHandle is based on drivers.TaskHandle and used by remote
// task drivers to migrate task handles between allocations.
type TaskHandle struct {
Version int
DriverState []byte
}
const (
@@ -1171,6 +1242,7 @@ type WorkloadIdentity struct {
ChangeSignal string `mapstructure:"change_signal" hcl:"change_signal,optional"`
Env bool `hcl:"env,optional"`
File bool `hcl:"file,optional"`
Filepath string `hcl:"filepath,optional"`
ServiceName string `hcl:"service_name,optional"`
TTL time.Duration `mapstructure:"ttl" hcl:"ttl,optional"`
}

View File

@@ -33,3 +33,12 @@ func formatFloat(f float64, maxPrec int) string {
func pointerOf[A any](a A) *A {
return &a
}
// pointerCopy returns a new pointer to a.
func pointerCopy[A any](a *A) *A {
if a == nil {
return nil
}
na := *a
return &na
}

View File

@@ -235,8 +235,8 @@ func (vars *Variables) readInternal(endpoint string, out **Variable, q *QueryOpt
}
r.setQueryOptions(q)
checkFn := requireStatusIn(http.StatusOK, http.StatusNotFound, http.StatusForbidden)
rtt, resp, err := checkFn(vars.client.doRequest(r))
checkFn := requireStatusIn(http.StatusOK, http.StatusNotFound, http.StatusForbidden) //nolint:bodyclose
rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose
if err != nil {
return nil, err
}
@@ -284,12 +284,12 @@ func (vars *Variables) deleteInternal(path string, q *WriteOptions) (*WriteMeta,
}
r.setWriteOptions(q)
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent)
rtt, resp, err := checkFn(vars.client.doRequest(r))
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent) //nolint:bodyclose
rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
_ = parseWriteMeta(resp, wm)
@@ -305,11 +305,12 @@ func (vars *Variables) deleteChecked(path string, checkIndex uint64, q *WriteOpt
return nil, err
}
r.setWriteOptions(q)
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict)
rtt, resp, err := checkFn(vars.client.doRequest(r))
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict) //nolint:bodyclose
rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
_ = parseWriteMeta(resp, wm)
@@ -341,8 +342,8 @@ func (vars *Variables) writeChecked(endpoint string, in *Variable, out *Variable
r.setWriteOptions(q)
r.obj = in
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict)
rtt, resp, err := checkFn(vars.client.doRequest(r))
checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict) //nolint:bodyclose
rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose
if err != nil {
return nil, err

27
vendor/golang.org/x/exp/LICENSE generated vendored
View File

@@ -1,27 +0,0 @@
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/exp/PATENTS generated vendored
View File

@@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

86
vendor/golang.org/x/exp/maps/maps.go generated vendored
View File

@@ -1,86 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package maps defines various functions useful with maps of any type.
package maps
import "maps"
// Keys returns the keys of the map m.
// The keys will be in an indeterminate order.
//
// The simplest true equivalent using the standard library is:
//
// slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m))
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
r := make([]K, 0, len(m))
for k := range m {
r = append(r, k)
}
return r
}
// Values returns the values of the map m.
// The values will be in an indeterminate order.
//
// The simplest true equivalent using the standard library is:
//
// slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m))
func Values[M ~map[K]V, K comparable, V any](m M) []V {
r := make([]V, 0, len(m))
for _, v := range m {
r = append(r, v)
}
return r
}
// Equal reports whether two maps contain the same key/value pairs.
// Values are compared using ==.
//
//go:fix inline
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
return maps.Equal(m1, m2)
}
// EqualFunc is like Equal, but compares values using eq.
// Keys are still compared with ==.
//
//go:fix inline
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
return maps.EqualFunc(m1, m2, eq)
}
// Clear removes all entries from m, leaving it empty.
//
//go:fix inline
func Clear[M ~map[K]V, K comparable, V any](m M) {
clear(m)
}
// Clone returns a copy of m. This is a shallow clone:
// the new keys and values are set using ordinary assignment.
//
//go:fix inline
func Clone[M ~map[K]V, K comparable, V any](m M) M {
return maps.Clone(m)
}
// Copy copies all key/value pairs in src adding them to dst.
// When a key in src is already present in dst,
// the value in dst will be overwritten by the value associated
// with the key in src.
//
//go:fix inline
func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
maps.Copy(dst, src)
}
// DeleteFunc deletes any key/value pairs from m for which del returns true.
//
//go:fix inline
func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
maps.DeleteFunc(m, del)
}

5
vendor/modules.txt vendored
View File

@@ -293,7 +293,7 @@ github.com/hashicorp/go-multierror
# github.com/hashicorp/go-rootcerts v1.0.2
## explicit; go 1.12
github.com/hashicorp/go-rootcerts
# github.com/hashicorp/nomad/api v0.0.0-20231213195942-64e3dca9274b
# github.com/hashicorp/nomad/api v0.0.0-20250812204832-62b195aaa535
## explicit; go 1.20
github.com/hashicorp/nomad/api
github.com/hashicorp/nomad/api/contexts
@@ -509,9 +509,6 @@ go.opentelemetry.io/otel/trace/noop
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/scrypt
golang.org/x/crypto/sha3
# golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
## explicit; go 1.23.0
golang.org/x/exp/maps
# golang.org/x/mod v0.27.0
## explicit; go 1.23.0
golang.org/x/mod/semver