mirror of
https://github.com/crazy-max/diun.git
synced 2025-12-21 21:33:22 +01:00
chore(deps): bump k8s.io/client-go from 0.29.3 to 0.32.0
Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.29.3 to 0.32.0. - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.29.3...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
12
vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
Normal file
12
vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
104
vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
Normal file
104
vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
# Do not delete linter settings. Linters like gocritic can be enabled on the command line.
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
rules:
|
||||
prevent_unmaintained_packages:
|
||||
list-mode: strict
|
||||
files:
|
||||
- $all
|
||||
- "!$test"
|
||||
allow:
|
||||
- $gostd
|
||||
- github.com/x448/float16
|
||||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
|
||||
dupl:
|
||||
threshold: 100
|
||||
funlen:
|
||||
lines: 100
|
||||
statements: 50
|
||||
goconst:
|
||||
ignore-tests: true
|
||||
min-len: 2
|
||||
min-occurrences: 3
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- diagnostic
|
||||
- experimental
|
||||
- opinionated
|
||||
- performance
|
||||
- style
|
||||
disabled-checks:
|
||||
- commentedOutCode
|
||||
- dupImport # https://github.com/go-critic/go-critic/issues/845
|
||||
- ifElseChain
|
||||
- octalLiteral
|
||||
- paramTypeCombine
|
||||
- whyNoLint
|
||||
gofmt:
|
||||
simplify: false
|
||||
goimports:
|
||||
local-prefixes: github.com/fxamacker/cbor
|
||||
golint:
|
||||
min-confidence: 0
|
||||
govet:
|
||||
check-shadowing: true
|
||||
lll:
|
||||
line-length: 140
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
staticcheck:
|
||||
checks: ["all"]
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- depguard
|
||||
- errcheck
|
||||
- exportloopref
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nilerr
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
|
||||
issues:
|
||||
# max-issues-per-linter default is 50. Set to 0 to disable limit.
|
||||
max-issues-per-linter: 0
|
||||
# max-same-issues default is 3. Set to 0 to disable limit.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
- path: decode.go
|
||||
text: "string ` overflows ` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string `, ` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string `\\]\\)` has (\\d+) occurrences, make it a constant"
|
||||
- path: valid.go
|
||||
text: "string ` for type ` has (\\d+) occurrences, make it a constant"
|
||||
- path: valid.go
|
||||
text: "string `cbor: ` has (\\d+) occurrences, make it a constant"
|
||||
133
vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
Normal file
133
vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
faye.github@gmail.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
41
vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
Normal file
41
vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
# How to contribute
|
||||
|
||||
You can contribute by using the library, opening issues, or opening pull requests.
|
||||
|
||||
## Bug reports and security vulnerabilities
|
||||
|
||||
Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues).
|
||||
|
||||
To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy).
|
||||
|
||||
Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me.
|
||||
|
||||
## Pull requests
|
||||
|
||||
Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc.
|
||||
|
||||
Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts.
|
||||
|
||||
See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details.
|
||||
|
||||
Pull requests have a greater chance of being approved if:
|
||||
- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature.
|
||||
- it has > 97% code coverage.
|
||||
|
||||
## Describe your issue
|
||||
|
||||
Clearly describe the issue:
|
||||
* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error.
|
||||
* If you propose a change or addition, try to give an example how the improved code could look like or how to use it.
|
||||
* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message.
|
||||
|
||||
## Please don't
|
||||
|
||||
Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me.
|
||||
|
||||
Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me.
|
||||
|
||||
## Credits
|
||||
|
||||
- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22.
|
||||
- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements.
|
||||
21
vendor/github.com/fxamacker/cbor/v2/LICENSE
generated
vendored
Normal file
21
vendor/github.com/fxamacker/cbor/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019-present Faye Amacker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
691
vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
Normal file
691
vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,691 @@
|
||||
# CBOR Codec in Go
|
||||
|
||||
<!-- [](#cbor-library-in-go) -->
|
||||
|
||||
[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
|
||||
|
||||
CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
|
||||
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
|
||||
|
||||
See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
|
||||
|
||||
## fxamacker/cbor
|
||||
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
|
||||
[](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
|
||||
[](#fuzzing-and-code-coverage)
|
||||
[](https://goreportcard.com/report/github.com/fxamacker/cbor)
|
||||
|
||||
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||
|
||||
Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
|
||||
|
||||
Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
|
||||
|
||||
<details><summary>Highlights</summary><p/>
|
||||
|
||||
__🚀 Speed__
|
||||
|
||||
Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data.
|
||||
|
||||
__🔒 Security__
|
||||
|
||||
Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||
|
||||
Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation.
|
||||
|
||||
__🗜️ Data Size__
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
|
||||
|
||||
__:jigsaw: Usability__
|
||||
|
||||
API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines.
|
||||
|
||||
Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc.
|
||||
|
||||
__📆 Extensibility__
|
||||
|
||||
Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library.
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
### Secure Decoding with Configurable Settings
|
||||
|
||||
`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
|
||||
|
||||
By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||
|
||||
<details><summary>Example decoding with encoding/gob 💥 fatal error (out of memory)</summary><p/>
|
||||
|
||||
```Go
|
||||
// Example of encoding/gob having "fatal error: runtime: out of memory"
|
||||
// while decoding 181 bytes.
|
||||
package main
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Example data is from https://github.com/golang/go/issues/24446
|
||||
// (shortened to 181 bytes).
|
||||
const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
|
||||
"01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
|
||||
"860001013001ff860001013001ffb80000001eff850401010e3030303030" +
|
||||
"30303030303030303001ff3000010c0104000016ffb70201010830303030" +
|
||||
"3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
|
||||
"303030303030303030303030303030303030303030303030303030303030" +
|
||||
"30"
|
||||
|
||||
type X struct {
|
||||
J *X
|
||||
K map[string]int
|
||||
}
|
||||
|
||||
func main() {
|
||||
raw, _ := hex.DecodeString(data)
|
||||
decoder := gob.NewDecoder(bytes.NewReader(raw))
|
||||
|
||||
var x X
|
||||
decoder.Decode(&x) // fatal error: runtime: out of memory
|
||||
fmt.Println("Decoding finished.")
|
||||
}
|
||||
```
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
|
||||
decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
|
||||
|
||||
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||
| :---- | ------------: | -----: | -----: |
|
||||
| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
|
||||
| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op |
|
||||
|
||||
<details><summary>Benchmark details</summary><p/>
|
||||
|
||||
Latest comparison used:
|
||||
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
|
||||
- go test -bench=. -benchmem -count=20
|
||||
|
||||
#### Prior comparisons
|
||||
|
||||
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||
| :---- | ------------: | -----: | -----: |
|
||||
| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
|
||||
| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
|
||||
| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
|
||||
| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
|
||||
|
||||
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
- go1.19.6, linux/amd64, i5-13600K (DDR4)
|
||||
- go test -bench=. -benchmem -count=20
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
### Smaller Encodings with Struct Tags
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||
|
||||
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
|
||||
https://go.dev/play/p/YxwvfPdFQG2
|
||||
|
||||
```Go
|
||||
// Example encoding nested struct (with omitempty tag)
|
||||
// - encoding/json: 18 byte JSON
|
||||
// - fxamacker/cbor: 1 byte CBOR
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
type GrandChild struct {
|
||||
Quux int `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Child struct {
|
||||
Baz int `json:",omitempty"`
|
||||
Qux GrandChild `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Foo Child `json:",omitempty"`
|
||||
Bar int `json:",omitempty"`
|
||||
}
|
||||
|
||||
func cb() {
|
||||
results, _ := cbor.Marshal(Parent{})
|
||||
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||
|
||||
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||
fmt.Println("DN: " + text)
|
||||
}
|
||||
|
||||
func js() {
|
||||
results, _ := json.Marshal(Parent{})
|
||||
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||
|
||||
text := string(results) // JSON
|
||||
fmt.Println("JSON: " + text)
|
||||
}
|
||||
|
||||
func main() {
|
||||
cb()
|
||||
fmt.Println("-------------")
|
||||
js()
|
||||
}
|
||||
```
|
||||
|
||||
Output (DN is Diagnostic Notation):
|
||||
```
|
||||
hex(CBOR): a0
|
||||
DN: {}
|
||||
-------------
|
||||
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||
JSON: {"Foo":{"Qux":{}}}
|
||||
```
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
Example using different struct tags together:
|
||||
|
||||

|
||||
|
||||
API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
|
||||
|
||||
## Quick Start
|
||||
|
||||
__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
|
||||
|
||||
### Key Points
|
||||
|
||||
This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
|
||||
|
||||
- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items.
|
||||
- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items.
|
||||
|
||||
Configurable limits and options can be used to balance trade-offs.
|
||||
|
||||
- Encoding and decoding modes are created from options (settings).
|
||||
- Modes can be created at startup and reused.
|
||||
- Modes are safe for concurrent use.
|
||||
|
||||
### Default Mode
|
||||
|
||||
Package level functions only use this library's default settings.
|
||||
They provide the "default mode" of encoding and decoding.
|
||||
|
||||
```go
|
||||
// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc.
|
||||
b, err = cbor.Marshal(v) // encode v to []byte b
|
||||
err = cbor.Unmarshal(b, &v) // decode []byte b to v
|
||||
decoder = cbor.NewDecoder(r) // create decoder with io.Reader r
|
||||
err = decoder.Decode(&v) // decode a CBOR data item to v
|
||||
|
||||
// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface.
|
||||
err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool.
|
||||
|
||||
// v2.5.0 added new functions that return remaining bytes.
|
||||
|
||||
// UnmarshalFirst decodes first CBOR data item and returns remaining bytes.
|
||||
rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
|
||||
|
||||
// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
|
||||
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
|
||||
|
||||
// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
|
||||
// but new funcs UnmarshalFirst and DiagnoseFirst do not.
|
||||
```
|
||||
|
||||
__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc.
|
||||
|
||||
- Different CBOR libraries may use different default settings.
|
||||
- CBOR-based formats or protocols usually require specific settings.
|
||||
|
||||
For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
|
||||
|
||||
### Presets
|
||||
|
||||
Presets can be used as-is or as a starting point for custom settings.
|
||||
|
||||
```go
|
||||
// EncOptions is a struct of encoder settings.
|
||||
func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding
|
||||
func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization
|
||||
func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR
|
||||
func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR
|
||||
```
|
||||
|
||||
Presets are used to create custom modes.
|
||||
|
||||
### Custom Modes
|
||||
|
||||
Modes are created from settings. Once created, modes have immutable settings.
|
||||
|
||||
💡 Create the mode at startup and reuse it. It is safe for concurrent use.
|
||||
|
||||
```Go
|
||||
// Create encoding mode.
|
||||
opts := cbor.CoreDetEncOptions() // use preset options as a starting point
|
||||
opts.Time = cbor.TimeUnix // change any settings if needed
|
||||
em, err := opts.EncMode() // create an immutable encoding mode
|
||||
|
||||
// Reuse the encoding mode. It is safe for concurrent use.
|
||||
|
||||
// API matches encoding/json.
|
||||
b, err := em.Marshal(v) // encode v to []byte b
|
||||
encoder := em.NewEncoder(w) // create encoder with io.Writer w
|
||||
err := encoder.Encode(v) // encode v to io.Writer w
|
||||
```
|
||||
|
||||
Default mode and custom modes automatically apply struct tags.
|
||||
|
||||
### User Specified Buffer for Encoding (v2.7.0)
|
||||
|
||||
`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool.
|
||||
|
||||
```Go
|
||||
em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
|
||||
```
|
||||
|
||||
### Struct Tags
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||
|
||||
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
|
||||
https://go.dev/play/p/YxwvfPdFQG2
|
||||
|
||||
```Go
|
||||
// Example encoding nested struct (with omitempty tag)
|
||||
// - encoding/json: 18 byte JSON
|
||||
// - fxamacker/cbor: 1 byte CBOR
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
type GrandChild struct {
|
||||
Quux int `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Child struct {
|
||||
Baz int `json:",omitempty"`
|
||||
Qux GrandChild `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Foo Child `json:",omitempty"`
|
||||
Bar int `json:",omitempty"`
|
||||
}
|
||||
|
||||
func cb() {
|
||||
results, _ := cbor.Marshal(Parent{})
|
||||
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||
|
||||
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||
fmt.Println("DN: " + text)
|
||||
}
|
||||
|
||||
func js() {
|
||||
results, _ := json.Marshal(Parent{})
|
||||
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||
|
||||
text := string(results) // JSON
|
||||
fmt.Println("JSON: " + text)
|
||||
}
|
||||
|
||||
func main() {
|
||||
cb()
|
||||
fmt.Println("-------------")
|
||||
js()
|
||||
}
|
||||
```
|
||||
|
||||
Output (DN is Diagnostic Notation):
|
||||
```
|
||||
hex(CBOR): a0
|
||||
DN: {}
|
||||
-------------
|
||||
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||
JSON: {"Foo":{"Qux":{}}}
|
||||
```
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>Example using several struct tags</summary><p/>
|
||||
|
||||

|
||||
|
||||
</details>
|
||||
|
||||
Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
|
||||
|
||||
### CBOR Tags
|
||||
|
||||
CBOR tags are specified in a `TagSet`.
|
||||
|
||||
Custom modes can be created with a `TagSet` to handle CBOR tags.
|
||||
|
||||
```go
|
||||
em, err := opts.EncMode() // no CBOR tags
|
||||
em, err := opts.EncModeWithTags(ts) // immutable CBOR tags
|
||||
em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
|
||||
```
|
||||
|
||||
`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
|
||||
|
||||
<details><summary>Example using TagSet and TagOptions</summary><p/>
|
||||
|
||||
```go
|
||||
// Use signedCWT struct defined in "Decoding CWT" example.
|
||||
|
||||
// Create TagSet (safe for concurrency).
|
||||
tags := cbor.NewTagSet()
|
||||
// Register tag COSE_Sign1 18 with signedCWT type.
|
||||
tags.Add(
|
||||
cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired},
|
||||
reflect.TypeOf(signedCWT{}),
|
||||
18)
|
||||
|
||||
// Create DecMode with immutable tags.
|
||||
dm, _ := cbor.DecOptions{}.DecModeWithTags(tags)
|
||||
|
||||
// Unmarshal to signedCWT with tag support.
|
||||
var v signedCWT
|
||||
if err := dm.Unmarshal(data, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create EncMode with immutable tags.
|
||||
em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
|
||||
|
||||
// Marshal signedCWT with tag number.
|
||||
if data, err := cbor.Marshal(v); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Functions and Interfaces
|
||||
|
||||
<details><summary>Functions and interfaces at a glance</summary><p/>
|
||||
|
||||
Common functions with same API as `encoding/json`:
|
||||
- `Marshal`, `Unmarshal`
|
||||
- `NewEncoder`, `(*Encoder).Encode`
|
||||
- `NewDecoder`, `(*Decoder).Decode`
|
||||
|
||||
NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes
|
||||
because RFC 8949 treats CBOR data item with remaining bytes as malformed.
|
||||
- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes.
|
||||
|
||||
Other useful functions:
|
||||
- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
|
||||
- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
|
||||
- `Wellformed` returns true if the the CBOR data item is well-formed.
|
||||
|
||||
Interfaces identical or comparable to Go `encoding` packages include:
|
||||
`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
|
||||
|
||||
The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding.
|
||||
|
||||
</details>
|
||||
|
||||
### Security Tips
|
||||
|
||||
🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data.
|
||||
|
||||
Default limits may need to be increased for systems handling very large data (e.g. blockchains).
|
||||
|
||||
`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`.
|
||||
|
||||
## Status
|
||||
|
||||
v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
|
||||
|
||||
For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
|
||||
|
||||
### Prior Release
|
||||
|
||||
[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
|
||||
|
||||
v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||
|
||||
__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
|
||||
|
||||
See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes.
|
||||
|
||||
See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
|
||||
|
||||
<!--
|
||||
<details><summary>👉 Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
|
||||
|
||||
TODO: Update to v2.4.0 vs 2.5.0 (not beta2).
|
||||
|
||||
Comparison of v2.4.0 vs v2.5.0-beta2 provided by @448 (edited to fit width).
|
||||
|
||||
PR [#382](https://github.com/fxamacker/cbor/pull/382) returns buffer to pool in `Encode()`. It adds a bit of overhead to `Encode()` but `NewEncoder().Encode()` is a lot faster and uses less memory as shown here:
|
||||
|
||||
```
|
||||
$ benchstat bench-v2.4.0.log bench-f9e6291.log
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/fxamacker/cbor/v2
|
||||
cpu: 12th Gen Intel(R) Core(TM) i7-12700H
|
||||
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||
│ sec/op │ sec/op vs base │
|
||||
NewEncoderEncode/Go_bool_to_CBOR_bool-20 236.70n ± 2% 58.04n ± 1% -75.48% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 238.00n ± 2% 63.93n ± 1% -73.14% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 238.65n ± 2% 64.88n ± 1% -72.81% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_float64_to_CBOR_float-20 242.00n ± 2% 63.00n ± 1% -73.97% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 245.60n ± 1% 68.55n ± 1% -72.09% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_string_to_CBOR_text-20 243.20n ± 3% 68.39n ± 1% -71.88% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]int_to_CBOR_array-20 563.0n ± 2% 378.3n ± 0% -32.81% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 2.043µ ± 2% 1.906µ ± 2% -6.75% (p=0.000 n=10)
|
||||
geomean 349.7n 122.7n -64.92%
|
||||
|
||||
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||
│ B/op │ B/op vs base │
|
||||
NewEncoderEncode/Go_bool_to_CBOR_bool-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_float64_to_CBOR_float-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_string_to_CBOR_text-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]int_to_CBOR_array-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 544.0 ± 0% 416.0 ± 0% -23.53% (p=0.000 n=10)
|
||||
geomean 153.4 ? ¹ ²
|
||||
¹ summaries must be >0 to compute geomean
|
||||
² ratios must be >0 to compute geomean
|
||||
|
||||
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||
│ allocs/op │ allocs/op vs base │
|
||||
NewEncoderEncode/Go_bool_to_CBOR_bool-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_float64_to_CBOR_float-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_string_to_CBOR_text-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]int_to_CBOR_array-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 28.00 ± 0% 26.00 ± 0% -7.14% (p=0.000 n=10)
|
||||
geomean 2.782 ? ¹ ²
|
||||
¹ summaries must be >0 to compute geomean
|
||||
² ratios must be >0 to compute geomean
|
||||
```
|
||||
|
||||
</details>
|
||||
-->
|
||||
|
||||
## Who uses fxamacker/cbor
|
||||
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
|
||||
|
||||
`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope.
|
||||
|
||||
## Standards
|
||||
|
||||
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||
|
||||
Notable CBOR features include:
|
||||
|
||||
| CBOR Feature | Description |
|
||||
| :--- | :--- |
|
||||
| CBOR tags | API supports built-in and user-defined tags. |
|
||||
| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. |
|
||||
| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). |
|
||||
| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. |
|
||||
| Indefinite length data | Option to allow/forbid for encoding and decoding. |
|
||||
| Well-formedness | Always checked and enforced. |
|
||||
| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. |
|
||||
| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). |
|
||||
|
||||
Known limitations are noted in the [Limitations section](#limitations).
|
||||
|
||||
Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps.
|
||||
|
||||
Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data.
|
||||
|
||||
After well-formedness is verified, basic validity errors are handled as follows:
|
||||
|
||||
* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default.
|
||||
* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys.
|
||||
|
||||
When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future.
|
||||
|
||||
By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined.
|
||||
|
||||
__Click to expand topic:__
|
||||
|
||||
<details>
|
||||
<summary>Duplicate Map Keys</summary><p>
|
||||
|
||||
This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
|
||||
|
||||
`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type.
|
||||
|
||||
`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number.
|
||||
|
||||
APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Tag Validity</summary><p>
|
||||
|
||||
This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
|
||||
|
||||
* Inadmissible type for tag content
|
||||
* Inadmissible value for tag content
|
||||
|
||||
Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways:
|
||||
|
||||
* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type.
|
||||
* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified.
|
||||
|
||||
Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR.
|
||||
|
||||
For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options).
|
||||
|
||||
</details>
|
||||
|
||||
## Limitations
|
||||
|
||||
If any of these limitations prevent you from using this library, please open an issue along with a link to your project.
|
||||
|
||||
* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`.
|
||||
* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items.
|
||||
* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation.
|
||||
|
||||
## Fuzzing and Code Coverage
|
||||
|
||||
__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release.
|
||||
|
||||
__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project.
|
||||
|
||||
<hr>
|
||||
|
||||
## Versions and API Changes
|
||||
This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes.
|
||||
|
||||
These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases:
|
||||
`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`.
|
||||
|
||||
Exclusions from SemVer:
|
||||
- Newly added API documented as "subject to change".
|
||||
- Newly added API in the master branch that has never been tagged in non-beta release.
|
||||
- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||
|
||||
This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please open an issue before beginning work on a PR. The improvement may have already been considered, etc.
|
||||
|
||||
For more info, see [How to Contribute](CONTRIBUTING.md).
|
||||
|
||||
## Security Policy
|
||||
|
||||
Security fixes are provided for the latest released version of fxamacker/cbor.
|
||||
|
||||
For the full text of the Security Policy, see [SECURITY.md](SECURITY.md).
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
Many thanks to all the contributors on this project!
|
||||
|
||||
I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more.
|
||||
|
||||
I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days.
|
||||
|
||||
Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0.
|
||||
|
||||
This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs.
|
||||
|
||||
Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis).
|
||||
|
||||
Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included!
|
||||
|
||||
This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well.
|
||||
|
||||
## License
|
||||
|
||||
Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker).
|
||||
|
||||
fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text.
|
||||
|
||||
<hr>
|
||||
7
vendor/github.com/fxamacker/cbor/v2/SECURITY.md
generated
vendored
Normal file
7
vendor/github.com/fxamacker/cbor/v2/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# Security Policy
|
||||
|
||||
Security fixes are provided for the latest released version of fxamacker/cbor.
|
||||
|
||||
If the security vulnerability is already known to the public, then you can open an issue as a bug report.
|
||||
|
||||
To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public.
|
||||
63
vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
Normal file
63
vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// ByteString represents CBOR byte string (major type 2). ByteString can be used
|
||||
// when using a Go []byte is not possible or convenient. For example, Go doesn't
|
||||
// allow []byte as map key, so ByteString can be used to support data formats
|
||||
// having CBOR map with byte string keys. ByteString can also be used to
|
||||
// encode invalid UTF-8 string as CBOR byte string.
|
||||
// See DecOption.MapKeyByteStringMode for more details.
|
||||
type ByteString string
|
||||
|
||||
// Bytes returns bytes representing ByteString.
|
||||
func (bs ByteString) Bytes() []byte {
|
||||
return []byte(bs)
|
||||
}
|
||||
|
||||
// MarshalCBOR encodes ByteString as CBOR byte string (major type 2).
|
||||
func (bs ByteString) MarshalCBOR() ([]byte, error) {
|
||||
e := getEncodeBuffer()
|
||||
defer putEncodeBuffer(e)
|
||||
|
||||
// Encode length
|
||||
encodeHead(e, byte(cborTypeByteString), uint64(len(bs)))
|
||||
|
||||
// Encode data
|
||||
buf := make([]byte, e.Len()+len(bs))
|
||||
n := copy(buf, e.Bytes())
|
||||
copy(buf[n:], bs)
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
|
||||
// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
|
||||
func (bs *ByteString) UnmarshalCBOR(data []byte) error {
|
||||
if bs == nil {
|
||||
return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
// Decoding CBOR null and CBOR undefined to ByteString resets data.
|
||||
// This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
|
||||
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||
*bs = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Check if CBOR data type is byte string
|
||||
if typ := d.nextCBORType(); typ != cborTypeByteString {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()}
|
||||
}
|
||||
|
||||
b, _ := d.parseByteString()
|
||||
*bs = ByteString(b)
|
||||
return nil
|
||||
}
|
||||
363
vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
Normal file
363
vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
Normal file
@@ -0,0 +1,363 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type encodeFuncs struct {
|
||||
ef encodeFunc
|
||||
ief isEmptyFunc
|
||||
}
|
||||
|
||||
var (
|
||||
decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType
|
||||
encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType
|
||||
encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs
|
||||
typeInfoCache sync.Map // map[reflect.Type]*typeInfo
|
||||
)
|
||||
|
||||
type specialType int
|
||||
|
||||
const (
|
||||
specialTypeNone specialType = iota
|
||||
specialTypeUnmarshalerIface
|
||||
specialTypeEmptyIface
|
||||
specialTypeIface
|
||||
specialTypeTag
|
||||
specialTypeTime
|
||||
)
|
||||
|
||||
type typeInfo struct {
|
||||
elemTypeInfo *typeInfo
|
||||
keyTypeInfo *typeInfo
|
||||
typ reflect.Type
|
||||
kind reflect.Kind
|
||||
nonPtrType reflect.Type
|
||||
nonPtrKind reflect.Kind
|
||||
spclType specialType
|
||||
}
|
||||
|
||||
func newTypeInfo(t reflect.Type) *typeInfo {
|
||||
tInfo := typeInfo{typ: t, kind: t.Kind()}
|
||||
|
||||
for t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
k := t.Kind()
|
||||
|
||||
tInfo.nonPtrType = t
|
||||
tInfo.nonPtrKind = k
|
||||
|
||||
if k == reflect.Interface {
|
||||
if t.NumMethod() == 0 {
|
||||
tInfo.spclType = specialTypeEmptyIface
|
||||
} else {
|
||||
tInfo.spclType = specialTypeIface
|
||||
}
|
||||
} else if t == typeTag {
|
||||
tInfo.spclType = specialTypeTag
|
||||
} else if t == typeTime {
|
||||
tInfo.spclType = specialTypeTime
|
||||
} else if reflect.PtrTo(t).Implements(typeUnmarshaler) {
|
||||
tInfo.spclType = specialTypeUnmarshalerIface
|
||||
}
|
||||
|
||||
switch k {
|
||||
case reflect.Array, reflect.Slice:
|
||||
tInfo.elemTypeInfo = getTypeInfo(t.Elem())
|
||||
case reflect.Map:
|
||||
tInfo.keyTypeInfo = getTypeInfo(t.Key())
|
||||
tInfo.elemTypeInfo = getTypeInfo(t.Elem())
|
||||
}
|
||||
|
||||
return &tInfo
|
||||
}
|
||||
|
||||
type decodingStructType struct {
|
||||
fields fields
|
||||
fieldIndicesByName map[string]int
|
||||
err error
|
||||
toArray bool
|
||||
}
|
||||
|
||||
// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead,
|
||||
// here's a very basic implementation of an aggregated error.
|
||||
type multierror []error
|
||||
|
||||
func (m multierror) Error() string {
|
||||
var sb strings.Builder
|
||||
for i, err := range m {
|
||||
sb.WriteString(err.Error())
|
||||
if i < len(m)-1 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func getDecodingStructType(t reflect.Type) *decodingStructType {
|
||||
if v, _ := decodingStructTypeCache.Load(t); v != nil {
|
||||
return v.(*decodingStructType)
|
||||
}
|
||||
|
||||
flds, structOptions := getFields(t)
|
||||
|
||||
toArray := hasToArrayOption(structOptions)
|
||||
|
||||
var errs []error
|
||||
for i := 0; i < len(flds); i++ {
|
||||
if flds[i].keyAsInt {
|
||||
nameAsInt, numErr := strconv.Atoi(flds[i].name)
|
||||
if numErr != nil {
|
||||
errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")"))
|
||||
break
|
||||
}
|
||||
flds[i].nameAsInt = int64(nameAsInt)
|
||||
}
|
||||
|
||||
flds[i].typInfo = getTypeInfo(flds[i].typ)
|
||||
}
|
||||
|
||||
fieldIndicesByName := make(map[string]int, len(flds))
|
||||
for i, fld := range flds {
|
||||
if _, ok := fieldIndicesByName[fld.name]; ok {
|
||||
errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name))
|
||||
continue
|
||||
}
|
||||
fieldIndicesByName[fld.name] = i
|
||||
}
|
||||
|
||||
var err error
|
||||
{
|
||||
var multi multierror
|
||||
for _, each := range errs {
|
||||
if each != nil {
|
||||
multi = append(multi, each)
|
||||
}
|
||||
}
|
||||
if len(multi) == 1 {
|
||||
err = multi[0]
|
||||
} else if len(multi) > 1 {
|
||||
err = multi
|
||||
}
|
||||
}
|
||||
|
||||
structType := &decodingStructType{
|
||||
fields: flds,
|
||||
fieldIndicesByName: fieldIndicesByName,
|
||||
err: err,
|
||||
toArray: toArray,
|
||||
}
|
||||
decodingStructTypeCache.Store(t, structType)
|
||||
return structType
|
||||
}
|
||||
|
||||
type encodingStructType struct {
|
||||
fields fields
|
||||
bytewiseFields fields
|
||||
lengthFirstFields fields
|
||||
omitEmptyFieldsIdx []int
|
||||
err error
|
||||
toArray bool
|
||||
}
|
||||
|
||||
func (st *encodingStructType) getFields(em *encMode) fields {
|
||||
switch em.sort {
|
||||
case SortNone, SortFastShuffle:
|
||||
return st.fields
|
||||
case SortLengthFirst:
|
||||
return st.lengthFirstFields
|
||||
default:
|
||||
return st.bytewiseFields
|
||||
}
|
||||
}
|
||||
|
||||
type bytewiseFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *bytewiseFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *bytewiseFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *bytewiseFieldSorter) Less(i, j int) bool {
|
||||
return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
|
||||
}
|
||||
|
||||
type lengthFirstFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *lengthFirstFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *lengthFirstFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *lengthFirstFieldSorter) Less(i, j int) bool {
|
||||
if len(x.fields[i].cborName) != len(x.fields[j].cborName) {
|
||||
return len(x.fields[i].cborName) < len(x.fields[j].cborName)
|
||||
}
|
||||
return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
|
||||
}
|
||||
|
||||
func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
|
||||
if v, _ := encodingStructTypeCache.Load(t); v != nil {
|
||||
structType := v.(*encodingStructType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
flds, structOptions := getFields(t)
|
||||
|
||||
if hasToArrayOption(structOptions) {
|
||||
return getEncodingStructToArrayType(t, flds)
|
||||
}
|
||||
|
||||
var err error
|
||||
var hasKeyAsInt bool
|
||||
var hasKeyAsStr bool
|
||||
var omitEmptyIdx []int
|
||||
e := getEncodeBuffer()
|
||||
for i := 0; i < len(flds); i++ {
|
||||
// Get field's encodeFunc
|
||||
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||
if flds[i].ef == nil {
|
||||
err = &UnsupportedTypeError{t}
|
||||
break
|
||||
}
|
||||
|
||||
// Encode field name
|
||||
if flds[i].keyAsInt {
|
||||
nameAsInt, numErr := strconv.Atoi(flds[i].name)
|
||||
if numErr != nil {
|
||||
err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")")
|
||||
break
|
||||
}
|
||||
flds[i].nameAsInt = int64(nameAsInt)
|
||||
if nameAsInt >= 0 {
|
||||
encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt))
|
||||
} else {
|
||||
n := nameAsInt*(-1) - 1
|
||||
encodeHead(e, byte(cborTypeNegativeInt), uint64(n))
|
||||
}
|
||||
flds[i].cborName = make([]byte, e.Len())
|
||||
copy(flds[i].cborName, e.Bytes())
|
||||
e.Reset()
|
||||
|
||||
hasKeyAsInt = true
|
||||
} else {
|
||||
encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name)))
|
||||
flds[i].cborName = make([]byte, e.Len()+len(flds[i].name))
|
||||
n := copy(flds[i].cborName, e.Bytes())
|
||||
copy(flds[i].cborName[n:], flds[i].name)
|
||||
e.Reset()
|
||||
|
||||
// If cborName contains a text string, then cborNameByteString contains a
|
||||
// string that has the byte string major type but is otherwise identical to
|
||||
// cborName.
|
||||
flds[i].cborNameByteString = make([]byte, len(flds[i].cborName))
|
||||
copy(flds[i].cborNameByteString, flds[i].cborName)
|
||||
// Reset encoded CBOR type to byte string, preserving the "additional
|
||||
// information" bits:
|
||||
flds[i].cborNameByteString[0] = byte(cborTypeByteString) |
|
||||
getAdditionalInformation(flds[i].cborNameByteString[0])
|
||||
|
||||
hasKeyAsStr = true
|
||||
}
|
||||
|
||||
// Check if field can be omitted when empty
|
||||
if flds[i].omitEmpty {
|
||||
omitEmptyIdx = append(omitEmptyIdx, i)
|
||||
}
|
||||
}
|
||||
putEncodeBuffer(e)
|
||||
|
||||
if err != nil {
|
||||
structType := &encodingStructType{err: err}
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
// Sort fields by canonical order
|
||||
bytewiseFields := make(fields, len(flds))
|
||||
copy(bytewiseFields, flds)
|
||||
sort.Sort(&bytewiseFieldSorter{bytewiseFields})
|
||||
|
||||
lengthFirstFields := bytewiseFields
|
||||
if hasKeyAsInt && hasKeyAsStr {
|
||||
lengthFirstFields = make(fields, len(flds))
|
||||
copy(lengthFirstFields, flds)
|
||||
sort.Sort(&lengthFirstFieldSorter{lengthFirstFields})
|
||||
}
|
||||
|
||||
structType := &encodingStructType{
|
||||
fields: flds,
|
||||
bytewiseFields: bytewiseFields,
|
||||
lengthFirstFields: lengthFirstFields,
|
||||
omitEmptyFieldsIdx: omitEmptyIdx,
|
||||
}
|
||||
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
|
||||
for i := 0; i < len(flds); i++ {
|
||||
// Get field's encodeFunc
|
||||
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||
if flds[i].ef == nil {
|
||||
structType := &encodingStructType{err: &UnsupportedTypeError{t}}
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
}
|
||||
|
||||
structType := &encodingStructType{
|
||||
fields: flds,
|
||||
toArray: true,
|
||||
}
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) {
|
||||
if v, _ := encodeFuncCache.Load(t); v != nil {
|
||||
fs := v.(encodeFuncs)
|
||||
return fs.ef, fs.ief
|
||||
}
|
||||
ef, ief := getEncodeFuncInternal(t)
|
||||
encodeFuncCache.Store(t, encodeFuncs{ef, ief})
|
||||
return ef, ief
|
||||
}
|
||||
|
||||
func getTypeInfo(t reflect.Type) *typeInfo {
|
||||
if v, _ := typeInfoCache.Load(t); v != nil {
|
||||
return v.(*typeInfo)
|
||||
}
|
||||
tInfo := newTypeInfo(t)
|
||||
typeInfoCache.Store(t, tInfo)
|
||||
return tInfo
|
||||
}
|
||||
|
||||
func hasToArrayOption(tag string) bool {
|
||||
s := ",toarray"
|
||||
idx := strings.Index(tag, s)
|
||||
return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',')
|
||||
}
|
||||
182
vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
Normal file
182
vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type cborType uint8
|
||||
|
||||
const (
|
||||
cborTypePositiveInt cborType = 0x00
|
||||
cborTypeNegativeInt cborType = 0x20
|
||||
cborTypeByteString cborType = 0x40
|
||||
cborTypeTextString cborType = 0x60
|
||||
cborTypeArray cborType = 0x80
|
||||
cborTypeMap cborType = 0xa0
|
||||
cborTypeTag cborType = 0xc0
|
||||
cborTypePrimitives cborType = 0xe0
|
||||
)
|
||||
|
||||
func (t cborType) String() string {
|
||||
switch t {
|
||||
case cborTypePositiveInt:
|
||||
return "positive integer"
|
||||
case cborTypeNegativeInt:
|
||||
return "negative integer"
|
||||
case cborTypeByteString:
|
||||
return "byte string"
|
||||
case cborTypeTextString:
|
||||
return "UTF-8 text string"
|
||||
case cborTypeArray:
|
||||
return "array"
|
||||
case cborTypeMap:
|
||||
return "map"
|
||||
case cborTypeTag:
|
||||
return "tag"
|
||||
case cborTypePrimitives:
|
||||
return "primitives"
|
||||
default:
|
||||
return "Invalid type " + strconv.Itoa(int(t))
|
||||
}
|
||||
}
|
||||
|
||||
type additionalInformation uint8
|
||||
|
||||
const (
|
||||
maxAdditionalInformationWithoutArgument = 23
|
||||
additionalInformationWith1ByteArgument = 24
|
||||
additionalInformationWith2ByteArgument = 25
|
||||
additionalInformationWith4ByteArgument = 26
|
||||
additionalInformationWith8ByteArgument = 27
|
||||
|
||||
// For major type 7.
|
||||
additionalInformationAsFalse = 20
|
||||
additionalInformationAsTrue = 21
|
||||
additionalInformationAsNull = 22
|
||||
additionalInformationAsUndefined = 23
|
||||
additionalInformationAsFloat16 = 25
|
||||
additionalInformationAsFloat32 = 26
|
||||
additionalInformationAsFloat64 = 27
|
||||
|
||||
// For major type 2, 3, 4, 5.
|
||||
additionalInformationAsIndefiniteLengthFlag = 31
|
||||
)
|
||||
|
||||
const (
|
||||
maxSimpleValueInAdditionalInformation = 23
|
||||
minSimpleValueIn1ByteArgument = 32
|
||||
)
|
||||
|
||||
func (ai additionalInformation) isIndefiniteLength() bool {
|
||||
return ai == additionalInformationAsIndefiniteLengthFlag
|
||||
}
|
||||
|
||||
const (
|
||||
// From RFC 8949 Section 3:
|
||||
// "The initial byte of each encoded data item contains both information about the major type
|
||||
// (the high-order 3 bits, described in Section 3.1) and additional information
|
||||
// (the low-order 5 bits)."
|
||||
|
||||
// typeMask is used to extract major type in initial byte of encoded data item.
|
||||
typeMask = 0xe0
|
||||
|
||||
// additionalInformationMask is used to extract additional information in initial byte of encoded data item.
|
||||
additionalInformationMask = 0x1f
|
||||
)
|
||||
|
||||
func getType(raw byte) cborType {
|
||||
return cborType(raw & typeMask)
|
||||
}
|
||||
|
||||
func getAdditionalInformation(raw byte) byte {
|
||||
return raw & additionalInformationMask
|
||||
}
|
||||
|
||||
func isBreakFlag(raw byte) bool {
|
||||
return raw == cborBreakFlag
|
||||
}
|
||||
|
||||
func parseInitialByte(b byte) (t cborType, ai byte) {
|
||||
return getType(b), getAdditionalInformation(b)
|
||||
}
|
||||
|
||||
const (
|
||||
tagNumRFC3339Time = 0
|
||||
tagNumEpochTime = 1
|
||||
tagNumUnsignedBignum = 2
|
||||
tagNumNegativeBignum = 3
|
||||
tagNumExpectedLaterEncodingBase64URL = 21
|
||||
tagNumExpectedLaterEncodingBase64 = 22
|
||||
tagNumExpectedLaterEncodingBase16 = 23
|
||||
tagNumSelfDescribedCBOR = 55799
|
||||
)
|
||||
|
||||
const (
|
||||
cborBreakFlag = byte(0xff)
|
||||
cborByteStringWithIndefiniteLengthHead = byte(0x5f)
|
||||
cborTextStringWithIndefiniteLengthHead = byte(0x7f)
|
||||
cborArrayWithIndefiniteLengthHead = byte(0x9f)
|
||||
cborMapWithIndefiniteLengthHead = byte(0xbf)
|
||||
)
|
||||
|
||||
var (
|
||||
cborFalse = []byte{0xf4}
|
||||
cborTrue = []byte{0xf5}
|
||||
cborNil = []byte{0xf6}
|
||||
cborNaN = []byte{0xf9, 0x7e, 0x00}
|
||||
cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00}
|
||||
cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00}
|
||||
)
|
||||
|
||||
// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types.
|
||||
func validBuiltinTag(tagNum uint64, contentHead byte) error {
|
||||
t := getType(contentHead)
|
||||
switch tagNum {
|
||||
case tagNumRFC3339Time:
|
||||
// Tag content (date/time text string in RFC 3339 format) must be string type.
|
||||
if t != cborTypeTextString {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumRFC3339Time,
|
||||
"text string",
|
||||
t.String())
|
||||
}
|
||||
return nil
|
||||
|
||||
case tagNumEpochTime:
|
||||
// Tag content (epoch date/time) must be uint, int, or float type.
|
||||
if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumEpochTime,
|
||||
"integer or floating-point number",
|
||||
t.String())
|
||||
}
|
||||
return nil
|
||||
|
||||
case tagNumUnsignedBignum, tagNumNegativeBignum:
|
||||
// Tag content (bignum) must be byte type.
|
||||
if t != cborTypeByteString {
|
||||
return newInadmissibleTagContentTypeErrorf(
|
||||
fmt.Sprintf(
|
||||
"tag number %d or %d must be followed by byte string, got %s",
|
||||
tagNumUnsignedBignum,
|
||||
tagNumNegativeBignum,
|
||||
t.String(),
|
||||
))
|
||||
}
|
||||
return nil
|
||||
|
||||
case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
|
||||
// From RFC 8949 3.4.5.2:
|
||||
// The data item tagged can be a byte string or any other data item. In the latter
|
||||
// case, the tag applies to all of the byte string data items contained in the data
|
||||
// item, except for those contained in a nested data item tagged with an expected
|
||||
// conversion.
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
3187
vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
Normal file
3187
vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
724
vendor/github.com/fxamacker/cbor/v2/diagnose.go
generated
vendored
Normal file
724
vendor/github.com/fxamacker/cbor/v2/diagnose.go
generated
vendored
Normal file
@@ -0,0 +1,724 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/x448/float16"
|
||||
)
|
||||
|
||||
// DiagMode is the main interface for CBOR diagnostic notation.
|
||||
type DiagMode interface {
|
||||
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode.
|
||||
Diagnose([]byte) (string, error)
|
||||
|
||||
// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||
DiagnoseFirst([]byte) (string, []byte, error)
|
||||
|
||||
// DiagOptions returns user specified options used to create this DiagMode.
|
||||
DiagOptions() DiagOptions
|
||||
}
|
||||
|
||||
// ByteStringEncoding specifies the base encoding that byte strings are notated.
|
||||
type ByteStringEncoding uint8
|
||||
|
||||
const (
|
||||
// ByteStringBase16Encoding encodes byte strings in base16, without padding.
|
||||
ByteStringBase16Encoding ByteStringEncoding = iota
|
||||
|
||||
// ByteStringBase32Encoding encodes byte strings in base32, without padding.
|
||||
ByteStringBase32Encoding
|
||||
|
||||
// ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding.
|
||||
ByteStringBase32HexEncoding
|
||||
|
||||
// ByteStringBase64Encoding encodes byte strings in base64url, without padding.
|
||||
ByteStringBase64Encoding
|
||||
|
||||
maxByteStringEncoding
|
||||
)
|
||||
|
||||
func (bse ByteStringEncoding) valid() error {
|
||||
if bse >= maxByteStringEncoding {
|
||||
return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DiagOptions specifies Diag options.
|
||||
type DiagOptions struct {
|
||||
// ByteStringEncoding specifies the base encoding that byte strings are notated.
|
||||
// Default is ByteStringBase16Encoding.
|
||||
ByteStringEncoding ByteStringEncoding
|
||||
|
||||
// ByteStringHexWhitespace specifies notating with whitespace in byte string
|
||||
// when ByteStringEncoding is ByteStringBase16Encoding.
|
||||
ByteStringHexWhitespace bool
|
||||
|
||||
// ByteStringText specifies notating with text in byte string
|
||||
// if it is a valid UTF-8 text.
|
||||
ByteStringText bool
|
||||
|
||||
// ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string
|
||||
// if it is a valid CBOR bytes.
|
||||
ByteStringEmbeddedCBOR bool
|
||||
|
||||
// CBORSequence specifies notating CBOR sequences.
|
||||
// otherwise, it returns an error if there are more bytes after the first CBOR.
|
||||
CBORSequence bool
|
||||
|
||||
// FloatPrecisionIndicator specifies appending a suffix to indicate float precision.
|
||||
// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators.
|
||||
FloatPrecisionIndicator bool
|
||||
|
||||
// MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags.
|
||||
// Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can
|
||||
// require larger amounts of stack to deserialize. Don't increase this higher than you require.
|
||||
MaxNestedLevels int
|
||||
|
||||
// MaxArrayElements specifies the max number of elements for CBOR arrays.
|
||||
// Default is 128*1024=131072 and it can be set to [16, 2147483647]
|
||||
MaxArrayElements int
|
||||
|
||||
// MaxMapPairs specifies the max number of key-value pairs for CBOR maps.
|
||||
// Default is 128*1024=131072 and it can be set to [16, 2147483647]
|
||||
MaxMapPairs int
|
||||
}
|
||||
|
||||
// DiagMode returns a DiagMode with immutable options.
|
||||
func (opts DiagOptions) DiagMode() (DiagMode, error) {
|
||||
return opts.diagMode()
|
||||
}
|
||||
|
||||
func (opts DiagOptions) diagMode() (*diagMode, error) {
|
||||
if err := opts.ByteStringEncoding.valid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
decMode, err := DecOptions{
|
||||
MaxNestedLevels: opts.MaxNestedLevels,
|
||||
MaxArrayElements: opts.MaxArrayElements,
|
||||
MaxMapPairs: opts.MaxMapPairs,
|
||||
}.decMode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &diagMode{
|
||||
byteStringEncoding: opts.ByteStringEncoding,
|
||||
byteStringHexWhitespace: opts.ByteStringHexWhitespace,
|
||||
byteStringText: opts.ByteStringText,
|
||||
byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR,
|
||||
cborSequence: opts.CBORSequence,
|
||||
floatPrecisionIndicator: opts.FloatPrecisionIndicator,
|
||||
decMode: decMode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type diagMode struct {
|
||||
byteStringEncoding ByteStringEncoding
|
||||
byteStringHexWhitespace bool
|
||||
byteStringText bool
|
||||
byteStringEmbeddedCBOR bool
|
||||
cborSequence bool
|
||||
floatPrecisionIndicator bool
|
||||
decMode *decMode
|
||||
}
|
||||
|
||||
// DiagOptions returns user specified options used to create this DiagMode.
|
||||
func (dm *diagMode) DiagOptions() DiagOptions {
|
||||
return DiagOptions{
|
||||
ByteStringEncoding: dm.byteStringEncoding,
|
||||
ByteStringHexWhitespace: dm.byteStringHexWhitespace,
|
||||
ByteStringText: dm.byteStringText,
|
||||
ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR,
|
||||
CBORSequence: dm.cborSequence,
|
||||
FloatPrecisionIndicator: dm.floatPrecisionIndicator,
|
||||
MaxNestedLevels: dm.decMode.maxNestedLevels,
|
||||
MaxArrayElements: dm.decMode.maxArrayElements,
|
||||
MaxMapPairs: dm.decMode.maxMapPairs,
|
||||
}
|
||||
}
|
||||
|
||||
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode.
|
||||
func (dm *diagMode) Diagnose(data []byte) (string, error) {
|
||||
return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence)
|
||||
}
|
||||
|
||||
// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||
func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
|
||||
return newDiagnose(data, dm.decMode, dm).diagFirst()
|
||||
}
|
||||
|
||||
var defaultDiagMode, _ = DiagOptions{}.diagMode()
|
||||
|
||||
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items
|
||||
// using the default diagnostic mode.
|
||||
//
|
||||
// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation.
|
||||
func Diagnose(data []byte) (string, error) {
|
||||
return defaultDiagMode.Diagnose(data)
|
||||
}
|
||||
|
||||
// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||
func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
|
||||
return defaultDiagMode.DiagnoseFirst(data)
|
||||
}
|
||||
|
||||
type diagnose struct {
|
||||
dm *diagMode
|
||||
d *decoder
|
||||
w *bytes.Buffer
|
||||
}
|
||||
|
||||
func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose {
|
||||
return &diagnose{
|
||||
dm: diagm,
|
||||
d: &decoder{data: data, dm: decm},
|
||||
w: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
func (di *diagnose) diag(cborSequence bool) (string, error) {
|
||||
// CBOR Sequence
|
||||
firstItem := true
|
||||
for {
|
||||
switch err := di.wellformed(cborSequence); err {
|
||||
case nil:
|
||||
if !firstItem {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
firstItem = false
|
||||
if itemErr := di.item(); itemErr != nil {
|
||||
return di.w.String(), itemErr
|
||||
}
|
||||
|
||||
case io.EOF:
|
||||
if firstItem {
|
||||
return di.w.String(), err
|
||||
}
|
||||
return di.w.String(), nil
|
||||
|
||||
default:
|
||||
return di.w.String(), err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) {
|
||||
err = di.wellformed(true)
|
||||
if err == nil {
|
||||
err = di.item()
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// Return EDN and the rest of the data slice (which might be len 0)
|
||||
return di.w.String(), di.d.data[di.d.off:], nil
|
||||
}
|
||||
|
||||
return di.w.String(), nil, err
|
||||
}
|
||||
|
||||
func (di *diagnose) wellformed(allowExtraData bool) error {
|
||||
off := di.d.off
|
||||
err := di.d.wellformed(allowExtraData, false)
|
||||
di.d.off = off
|
||||
return err
|
||||
}
|
||||
|
||||
func (di *diagnose) item() error { //nolint:gocyclo
|
||||
initialByte := di.d.data[di.d.off]
|
||||
switch initialByte {
|
||||
case cborByteStringWithIndefiniteLengthHead,
|
||||
cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string
|
||||
di.d.off++
|
||||
if isBreakFlag(di.d.data[di.d.off]) {
|
||||
di.d.off++
|
||||
switch initialByte {
|
||||
case cborByteStringWithIndefiniteLengthHead:
|
||||
// indefinite-length bytes with no chunks.
|
||||
di.w.WriteString(`''_`)
|
||||
return nil
|
||||
case cborTextStringWithIndefiniteLengthHead:
|
||||
// indefinite-length text with no chunks.
|
||||
di.w.WriteString(`""_`)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteString("(_ ")
|
||||
|
||||
i := 0
|
||||
for !di.d.foundBreak() {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
|
||||
i++
|
||||
// wellformedIndefiniteString() already checked that the next item is a byte/text string.
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteByte(')')
|
||||
return nil
|
||||
|
||||
case cborArrayWithIndefiniteLengthHead: // indefinite-length array
|
||||
di.d.off++
|
||||
di.w.WriteString("[_ ")
|
||||
|
||||
i := 0
|
||||
for !di.d.foundBreak() {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
|
||||
i++
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteByte(']')
|
||||
return nil
|
||||
|
||||
case cborMapWithIndefiniteLengthHead: // indefinite-length map
|
||||
di.d.off++
|
||||
di.w.WriteString("{_ ")
|
||||
|
||||
i := 0
|
||||
for !di.d.foundBreak() {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
|
||||
i++
|
||||
// key
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
di.w.WriteString(": ")
|
||||
|
||||
// value
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteByte('}')
|
||||
return nil
|
||||
}
|
||||
|
||||
t := di.d.nextCBORType()
|
||||
switch t {
|
||||
case cborTypePositiveInt:
|
||||
_, _, val := di.d.getHead()
|
||||
di.w.WriteString(strconv.FormatUint(val, 10))
|
||||
return nil
|
||||
|
||||
case cborTypeNegativeInt:
|
||||
_, _, val := di.d.getHead()
|
||||
if val > math.MaxInt64 {
|
||||
// CBOR negative integer overflows int64, use big.Int to store value.
|
||||
bi := new(big.Int)
|
||||
bi.SetUint64(val)
|
||||
bi.Add(bi, big.NewInt(1))
|
||||
bi.Neg(bi)
|
||||
di.w.WriteString(bi.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
nValue := int64(-1) ^ int64(val)
|
||||
di.w.WriteString(strconv.FormatInt(nValue, 10))
|
||||
return nil
|
||||
|
||||
case cborTypeByteString:
|
||||
b, _ := di.d.parseByteString()
|
||||
return di.encodeByteString(b)
|
||||
|
||||
case cborTypeTextString:
|
||||
b, err := di.d.parseTextString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return di.encodeTextString(string(b), '"')
|
||||
|
||||
case cborTypeArray:
|
||||
_, _, val := di.d.getHead()
|
||||
count := int(val)
|
||||
di.w.WriteByte('[')
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
di.w.WriteByte(']')
|
||||
return nil
|
||||
|
||||
case cborTypeMap:
|
||||
_, _, val := di.d.getHead()
|
||||
count := int(val)
|
||||
di.w.WriteByte('{')
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
// key
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
di.w.WriteString(": ")
|
||||
// value
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
di.w.WriteByte('}')
|
||||
return nil
|
||||
|
||||
case cborTypeTag:
|
||||
_, _, tagNum := di.d.getHead()
|
||||
switch tagNum {
|
||||
case tagNumUnsignedBignum:
|
||||
if nt := di.d.nextCBORType(); nt != cborTypeByteString {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumUnsignedBignum,
|
||||
"byte string",
|
||||
nt.String())
|
||||
}
|
||||
|
||||
b, _ := di.d.parseByteString()
|
||||
bi := new(big.Int).SetBytes(b)
|
||||
di.w.WriteString(bi.String())
|
||||
return nil
|
||||
|
||||
case tagNumNegativeBignum:
|
||||
if nt := di.d.nextCBORType(); nt != cborTypeByteString {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumNegativeBignum,
|
||||
"byte string",
|
||||
nt.String(),
|
||||
)
|
||||
}
|
||||
|
||||
b, _ := di.d.parseByteString()
|
||||
bi := new(big.Int).SetBytes(b)
|
||||
bi.Add(bi, big.NewInt(1))
|
||||
bi.Neg(bi)
|
||||
di.w.WriteString(bi.String())
|
||||
return nil
|
||||
|
||||
default:
|
||||
di.w.WriteString(strconv.FormatUint(tagNum, 10))
|
||||
di.w.WriteByte('(')
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
di.w.WriteByte(')')
|
||||
return nil
|
||||
}
|
||||
|
||||
case cborTypePrimitives:
|
||||
_, ai, val := di.d.getHead()
|
||||
switch ai {
|
||||
case additionalInformationAsFalse:
|
||||
di.w.WriteString("false")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsTrue:
|
||||
di.w.WriteString("true")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsNull:
|
||||
di.w.WriteString("null")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsUndefined:
|
||||
di.w.WriteString("undefined")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsFloat16,
|
||||
additionalInformationAsFloat32,
|
||||
additionalInformationAsFloat64:
|
||||
return di.encodeFloat(ai, val)
|
||||
|
||||
default:
|
||||
di.w.WriteString("simple(")
|
||||
di.w.WriteString(strconv.FormatUint(val, 10))
|
||||
di.w.WriteByte(')')
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeU16 format a rune as "\uxxxx"
|
||||
func (di *diagnose) writeU16(val rune) {
|
||||
di.w.WriteString("\\u")
|
||||
var in [2]byte
|
||||
in[0] = byte(val >> 8)
|
||||
in[1] = byte(val)
|
||||
sz := hex.EncodedLen(len(in))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
hex.Encode(dst, in[:])
|
||||
di.w.Write(dst)
|
||||
}
|
||||
|
||||
var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding)
|
||||
var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
|
||||
|
||||
func (di *diagnose) encodeByteString(val []byte) error {
|
||||
if len(val) > 0 {
|
||||
if di.dm.byteStringText && utf8.Valid(val) {
|
||||
return di.encodeTextString(string(val), '\'')
|
||||
}
|
||||
|
||||
if di.dm.byteStringEmbeddedCBOR {
|
||||
di2 := newDiagnose(val, di.dm.decMode, di.dm)
|
||||
// should always notating embedded CBOR sequence.
|
||||
if str, err := di2.diag(true); err == nil {
|
||||
di.w.WriteString("<<")
|
||||
di.w.WriteString(str)
|
||||
di.w.WriteString(">>")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch di.dm.byteStringEncoding {
|
||||
case ByteStringBase16Encoding:
|
||||
di.w.WriteString("h'")
|
||||
if di.dm.byteStringHexWhitespace {
|
||||
sz := hex.EncodedLen(len(val))
|
||||
if len(val) > 0 {
|
||||
sz += len(val) - 1
|
||||
}
|
||||
di.w.Grow(sz)
|
||||
|
||||
dst := di.w.Bytes()[di.w.Len():]
|
||||
for i := range val {
|
||||
if i > 0 {
|
||||
dst = append(dst, ' ')
|
||||
}
|
||||
hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1])
|
||||
dst = dst[:len(dst)+2]
|
||||
}
|
||||
di.w.Write(dst)
|
||||
} else {
|
||||
sz := hex.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
hex.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
}
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
case ByteStringBase32Encoding:
|
||||
di.w.WriteString("b32'")
|
||||
sz := rawBase32Encoding.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
rawBase32Encoding.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
case ByteStringBase32HexEncoding:
|
||||
di.w.WriteString("h32'")
|
||||
sz := rawBase32HexEncoding.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
rawBase32HexEncoding.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
case ByteStringBase64Encoding:
|
||||
di.w.WriteString("b64'")
|
||||
sz := base64.RawURLEncoding.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
base64.RawURLEncoding.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
default:
|
||||
// It should not be possible for users to construct a *diagMode with an invalid byte
|
||||
// string encoding.
|
||||
panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding))
|
||||
}
|
||||
}
|
||||
|
||||
const utf16SurrSelf = rune(0x10000)
|
||||
|
||||
// quote should be either `'` or `"`
|
||||
func (di *diagnose) encodeTextString(val string, quote byte) error {
|
||||
di.w.WriteByte(quote)
|
||||
|
||||
for i := 0; i < len(val); {
|
||||
if b := val[i]; b < utf8.RuneSelf {
|
||||
switch {
|
||||
case b == '\t', b == '\n', b == '\r', b == '\\', b == quote:
|
||||
di.w.WriteByte('\\')
|
||||
|
||||
switch b {
|
||||
case '\t':
|
||||
b = 't'
|
||||
case '\n':
|
||||
b = 'n'
|
||||
case '\r':
|
||||
b = 'r'
|
||||
}
|
||||
di.w.WriteByte(b)
|
||||
|
||||
case b >= ' ' && b <= '~':
|
||||
di.w.WriteByte(b)
|
||||
|
||||
default:
|
||||
di.writeU16(rune(b))
|
||||
}
|
||||
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
c, size := utf8.DecodeRuneInString(val[i:])
|
||||
switch {
|
||||
case c == utf8.RuneError:
|
||||
return &SemanticError{"cbor: invalid UTF-8 string"}
|
||||
|
||||
case c < utf16SurrSelf:
|
||||
di.writeU16(c)
|
||||
|
||||
default:
|
||||
c1, c2 := utf16.EncodeRune(c)
|
||||
di.writeU16(c1)
|
||||
di.writeU16(c2)
|
||||
}
|
||||
|
||||
i += size
|
||||
}
|
||||
|
||||
di.w.WriteByte(quote)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (di *diagnose) encodeFloat(ai byte, val uint64) error {
|
||||
f64 := float64(0)
|
||||
switch ai {
|
||||
case additionalInformationAsFloat16:
|
||||
f16 := float16.Frombits(uint16(val))
|
||||
switch {
|
||||
case f16.IsNaN():
|
||||
di.w.WriteString("NaN")
|
||||
return nil
|
||||
case f16.IsInf(1):
|
||||
di.w.WriteString("Infinity")
|
||||
return nil
|
||||
case f16.IsInf(-1):
|
||||
di.w.WriteString("-Infinity")
|
||||
return nil
|
||||
default:
|
||||
f64 = float64(f16.Float32())
|
||||
}
|
||||
|
||||
case additionalInformationAsFloat32:
|
||||
f32 := math.Float32frombits(uint32(val))
|
||||
switch {
|
||||
case f32 != f32:
|
||||
di.w.WriteString("NaN")
|
||||
return nil
|
||||
case f32 > math.MaxFloat32:
|
||||
di.w.WriteString("Infinity")
|
||||
return nil
|
||||
case f32 < -math.MaxFloat32:
|
||||
di.w.WriteString("-Infinity")
|
||||
return nil
|
||||
default:
|
||||
f64 = float64(f32)
|
||||
}
|
||||
|
||||
case additionalInformationAsFloat64:
|
||||
f64 = math.Float64frombits(val)
|
||||
switch {
|
||||
case f64 != f64:
|
||||
di.w.WriteString("NaN")
|
||||
return nil
|
||||
case f64 > math.MaxFloat64:
|
||||
di.w.WriteString("Infinity")
|
||||
return nil
|
||||
case f64 < -math.MaxFloat64:
|
||||
di.w.WriteString("-Infinity")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Use ES6 number to string conversion which should match most JSON generators.
|
||||
// Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585
|
||||
const bitSize = 64
|
||||
b := make([]byte, 0, 32)
|
||||
if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) {
|
||||
b = strconv.AppendFloat(b, f64, 'e', -1, bitSize)
|
||||
// clean up e-09 to e-9
|
||||
n := len(b)
|
||||
if n >= 4 && string(b[n-4:n-1]) == "e-0" {
|
||||
b = append(b[:n-2], b[n-1])
|
||||
}
|
||||
} else {
|
||||
b = strconv.AppendFloat(b, f64, 'f', -1, bitSize)
|
||||
}
|
||||
|
||||
// add decimal point and trailing zero if needed
|
||||
if bytes.IndexByte(b, '.') < 0 {
|
||||
if i := bytes.IndexByte(b, 'e'); i < 0 {
|
||||
b = append(b, '.', '0')
|
||||
} else {
|
||||
b = append(b[:i+2], b[i:]...)
|
||||
b[i] = '.'
|
||||
b[i+1] = '0'
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteString(string(b))
|
||||
|
||||
if di.dm.floatPrecisionIndicator {
|
||||
switch ai {
|
||||
case additionalInformationAsFloat16:
|
||||
di.w.WriteString("_1")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsFloat32:
|
||||
di.w.WriteString("_2")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsFloat64:
|
||||
di.w.WriteString("_3")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
129
vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
Normal file
129
vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
/*
|
||||
Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags,
|
||||
Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding,
|
||||
CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
|
||||
|
||||
Encoding options allow "preferred serialization" by encoding integers and floats
|
||||
to their smallest forms (e.g. float16) when values fit.
|
||||
|
||||
Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller
|
||||
and easier to use with structs.
|
||||
|
||||
For example, "toarray" tag makes struct fields encode to CBOR array elements. And
|
||||
"keyasint" makes a field encode to an element of CBOR map with specified int key.
|
||||
|
||||
Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go
|
||||
|
||||
# Basics
|
||||
|
||||
The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
|
||||
|
||||
Function signatures identical to encoding/json include:
|
||||
|
||||
Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode.
|
||||
|
||||
Standard interfaces include:
|
||||
|
||||
BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler.
|
||||
|
||||
Custom encoding and decoding is possible by implementing standard interfaces for
|
||||
user-defined Go types.
|
||||
|
||||
Codec functions are available at package-level (using defaults options) or by
|
||||
creating modes from options at runtime.
|
||||
|
||||
"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode).
|
||||
|
||||
EncMode and DecMode interfaces are created from EncOptions or DecOptions structs.
|
||||
|
||||
em, err := cbor.EncOptions{...}.EncMode()
|
||||
em, err := cbor.CanonicalEncOptions().EncMode()
|
||||
em, err := cbor.CTAP2EncOptions().EncMode()
|
||||
|
||||
Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of
|
||||
modes won't accidentally change at runtime after they're created.
|
||||
|
||||
Modes are intended to be reused and are safe for concurrent use.
|
||||
|
||||
EncMode and DecMode Interfaces
|
||||
|
||||
// EncMode interface uses immutable options and is safe for concurrent use.
|
||||
type EncMode interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
NewEncoder(w io.Writer) *Encoder
|
||||
EncOptions() EncOptions // returns copy of options
|
||||
}
|
||||
|
||||
// DecMode interface uses immutable options and is safe for concurrent use.
|
||||
type DecMode interface {
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
NewDecoder(r io.Reader) *Decoder
|
||||
DecOptions() DecOptions // returns copy of options
|
||||
}
|
||||
|
||||
Using Default Encoding Mode
|
||||
|
||||
b, err := cbor.Marshal(v)
|
||||
|
||||
encoder := cbor.NewEncoder(w)
|
||||
err = encoder.Encode(v)
|
||||
|
||||
Using Default Decoding Mode
|
||||
|
||||
err := cbor.Unmarshal(b, &v)
|
||||
|
||||
decoder := cbor.NewDecoder(r)
|
||||
err = decoder.Decode(&v)
|
||||
|
||||
Creating and Using Encoding Modes
|
||||
|
||||
// Create EncOptions using either struct literal or a function.
|
||||
opts := cbor.CanonicalEncOptions()
|
||||
|
||||
// If needed, modify encoding options
|
||||
opts.Time = cbor.TimeUnix
|
||||
|
||||
// Create reusable EncMode interface with immutable options, safe for concurrent use.
|
||||
em, err := opts.EncMode()
|
||||
|
||||
// Use EncMode like encoding/json, with same function signatures.
|
||||
b, err := em.Marshal(v)
|
||||
// or
|
||||
encoder := em.NewEncoder(w)
|
||||
err := encoder.Encode(v)
|
||||
|
||||
// NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options
|
||||
// specified during creation of em (encoding mode).
|
||||
|
||||
# CBOR Options
|
||||
|
||||
Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options
|
||||
|
||||
Encoding Options: https://github.com/fxamacker/cbor#encoding-options
|
||||
|
||||
Decoding Options: https://github.com/fxamacker/cbor#decoding-options
|
||||
|
||||
# Struct Tags
|
||||
|
||||
Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
|
||||
If both struct tags are specified then `cbor` is used.
|
||||
|
||||
Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use
|
||||
very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
|
||||
|
||||
For example, "toarray" makes struct fields encode to array elements. And "keyasint"
|
||||
makes struct fields encode to elements of CBOR map with int keys.
|
||||
|
||||
https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
|
||||
|
||||
Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1
|
||||
|
||||
# Tests and Fuzzing
|
||||
|
||||
Over 375 tests are included in this package. Cover-guided fuzzing is handled by
|
||||
a private fuzzer that replaced fxamacker/cbor-fuzz years ago.
|
||||
*/
|
||||
package cbor
|
||||
1989
vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
Normal file
1989
vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
94
vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
Normal file
94
vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build go1.20
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type mapKeyValueEncodeFunc struct {
|
||||
kf, ef encodeFunc
|
||||
kpool, vpool sync.Pool
|
||||
}
|
||||
|
||||
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||
iterk := me.kpool.Get().(*reflect.Value)
|
||||
defer func() {
|
||||
iterk.SetZero()
|
||||
me.kpool.Put(iterk)
|
||||
}()
|
||||
iterv := me.vpool.Get().(*reflect.Value)
|
||||
defer func() {
|
||||
iterv.SetZero()
|
||||
me.vpool.Put(iterv)
|
||||
}()
|
||||
|
||||
if kvs == nil {
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
iterk.SetIterKey(iter)
|
||||
iterv.SetIterValue(iter)
|
||||
|
||||
if err := me.kf(e, em, *iterk); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := me.ef(e, em, *iterv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
initial := e.Len()
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
iterk.SetIterKey(iter)
|
||||
iterv.SetIterValue(iter)
|
||||
|
||||
offset := e.Len()
|
||||
if err := me.kf(e, em, *iterk); err != nil {
|
||||
return err
|
||||
}
|
||||
valueOffset := e.Len()
|
||||
if err := me.ef(e, em, *iterv); err != nil {
|
||||
return err
|
||||
}
|
||||
kvs[i] = keyValue{
|
||||
offset: offset - initial,
|
||||
valueOffset: valueOffset - initial,
|
||||
nextOffset: e.Len() - initial,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||
kf, _ := getEncodeFunc(t.Key())
|
||||
ef, _ := getEncodeFunc(t.Elem())
|
||||
if kf == nil || ef == nil {
|
||||
return nil
|
||||
}
|
||||
mkv := &mapKeyValueEncodeFunc{
|
||||
kf: kf,
|
||||
ef: ef,
|
||||
kpool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
rk := reflect.New(t.Key()).Elem()
|
||||
return &rk
|
||||
},
|
||||
},
|
||||
vpool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
rv := reflect.New(t.Elem()).Elem()
|
||||
return &rv
|
||||
},
|
||||
},
|
||||
}
|
||||
return mapEncodeFunc{
|
||||
e: mkv.encodeKeyValues,
|
||||
}.encode
|
||||
}
|
||||
60
vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
Normal file
60
vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build !go1.20
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type mapKeyValueEncodeFunc struct {
|
||||
kf, ef encodeFunc
|
||||
}
|
||||
|
||||
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||
if kvs == nil {
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
initial := e.Len()
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
offset := e.Len()
|
||||
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
valueOffset := e.Len()
|
||||
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
kvs[i] = keyValue{
|
||||
offset: offset - initial,
|
||||
valueOffset: valueOffset - initial,
|
||||
nextOffset: e.Len() - initial,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||
kf, _ := getEncodeFunc(t.Key())
|
||||
ef, _ := getEncodeFunc(t.Elem())
|
||||
if kf == nil || ef == nil {
|
||||
return nil
|
||||
}
|
||||
mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef}
|
||||
return mapEncodeFunc{
|
||||
e: mkv.encodeKeyValues,
|
||||
}.encode
|
||||
}
|
||||
69
vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
Normal file
69
vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// SimpleValue represents CBOR simple value.
|
||||
// CBOR simple value is:
|
||||
// - an extension point like CBOR tag.
|
||||
// - a subset of CBOR major type 7 that isn't floating-point.
|
||||
// - "identified by a number between 0 and 255, but distinct from that number itself".
|
||||
// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key.
|
||||
//
|
||||
// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined".
|
||||
// Other CBOR simple values are currently unassigned/reserved by IANA.
|
||||
type SimpleValue uint8
|
||||
|
||||
var (
|
||||
typeSimpleValue = reflect.TypeOf(SimpleValue(0))
|
||||
)
|
||||
|
||||
// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7).
|
||||
func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
|
||||
// RFC 8949 3.3. Floating-Point Numbers and Values with No Content says:
|
||||
// "An encoder MUST NOT issue two-byte sequences that start with 0xf8
|
||||
// (major type 7, additional information 24) and continue with a byte
|
||||
// less than 0x20 (32 decimal). Such sequences are not well-formed.
|
||||
// (This implies that an encoder cannot encode false, true, null, or
|
||||
// undefined in two-byte sequences and that only the one-byte variants
|
||||
// of these are well-formed; more generally speaking, each simple value
|
||||
// only has a single representation variant)."
|
||||
|
||||
switch {
|
||||
case sv <= maxSimpleValueInAdditionalInformation:
|
||||
return []byte{byte(cborTypePrimitives) | byte(sv)}, nil
|
||||
|
||||
case sv >= minSimpleValueIn1ByteArgument:
|
||||
return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil
|
||||
|
||||
default:
|
||||
return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)}
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
|
||||
func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
|
||||
if sv == nil {
|
||||
return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
typ, ai, val := d.getHead()
|
||||
|
||||
if typ != cborTypePrimitives {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"}
|
||||
}
|
||||
if ai > additionalInformationWith1ByteArgument {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"}
|
||||
}
|
||||
|
||||
// It is safe to cast val to uint8 here because
|
||||
// - data is already verified to be well-formed CBOR simple value and
|
||||
// - val is <= math.MaxUint8.
|
||||
*sv = SimpleValue(val)
|
||||
return nil
|
||||
}
|
||||
277
vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
Normal file
277
vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
Normal file
@@ -0,0 +1,277 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Decoder reads and decodes CBOR values from io.Reader.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
d decoder
|
||||
buf []byte
|
||||
off int // next read offset in buf
|
||||
bytesRead int
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads and decodes from r using
|
||||
// the default decoding options.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return defaultDecMode.NewDecoder(r)
|
||||
}
|
||||
|
||||
// Decode reads CBOR value and decodes it into the value pointed to by v.
|
||||
func (dec *Decoder) Decode(v interface{}) error {
|
||||
_, err := dec.readNext()
|
||||
if err != nil {
|
||||
// Return validation error or read error.
|
||||
return err
|
||||
}
|
||||
|
||||
dec.d.reset(dec.buf[dec.off:])
|
||||
err = dec.d.value(v)
|
||||
|
||||
// Increment dec.off even if decoding err is not nil because
|
||||
// dec.d.off points to the next CBOR data item if current
|
||||
// CBOR data item is valid but failed to be decoded into v.
|
||||
// This allows next CBOR data item to be decoded in next
|
||||
// call to this function.
|
||||
dec.off += dec.d.off
|
||||
dec.bytesRead += dec.d.off
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Skip skips to the next CBOR data item (if there is any),
|
||||
// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc.
|
||||
func (dec *Decoder) Skip() error {
|
||||
n, err := dec.readNext()
|
||||
if err != nil {
|
||||
// Return validation error or read error.
|
||||
return err
|
||||
}
|
||||
|
||||
dec.off += n
|
||||
dec.bytesRead += n
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumBytesRead returns the number of bytes read.
|
||||
func (dec *Decoder) NumBytesRead() int {
|
||||
return dec.bytesRead
|
||||
}
|
||||
|
||||
// Buffered returns a reader for data remaining in Decoder's buffer.
|
||||
// Returned reader is valid until the next call to Decode or Skip.
|
||||
func (dec *Decoder) Buffered() io.Reader {
|
||||
return bytes.NewReader(dec.buf[dec.off:])
|
||||
}
|
||||
|
||||
// readNext() reads next CBOR data item from Reader to buffer.
|
||||
// It returns the size of next CBOR data item.
|
||||
// It also returns validation error or read error if any.
|
||||
func (dec *Decoder) readNext() (int, error) {
|
||||
var readErr error
|
||||
var validErr error
|
||||
|
||||
for {
|
||||
// Process any unread data in dec.buf.
|
||||
if dec.off < len(dec.buf) {
|
||||
dec.d.reset(dec.buf[dec.off:])
|
||||
off := dec.off // Save offset before data validation
|
||||
validErr = dec.d.wellformed(true, false)
|
||||
dec.off = off // Restore offset
|
||||
|
||||
if validErr == nil {
|
||||
return dec.d.off, nil
|
||||
}
|
||||
|
||||
if validErr != io.ErrUnexpectedEOF {
|
||||
return 0, validErr
|
||||
}
|
||||
|
||||
// Process last read error on io.ErrUnexpectedEOF.
|
||||
if readErr != nil {
|
||||
if readErr == io.EOF {
|
||||
// current CBOR data item is incomplete.
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, readErr
|
||||
}
|
||||
}
|
||||
|
||||
// More data is needed and there was no read error.
|
||||
var n int
|
||||
for n == 0 {
|
||||
n, readErr = dec.read()
|
||||
if n == 0 && readErr != nil {
|
||||
// No more data can be read and read error is encountered.
|
||||
// At this point, validErr is either nil or io.ErrUnexpectedEOF.
|
||||
if readErr == io.EOF {
|
||||
if validErr == io.ErrUnexpectedEOF {
|
||||
// current CBOR data item is incomplete.
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
return 0, readErr
|
||||
}
|
||||
}
|
||||
|
||||
// At this point, dec.buf contains new data from last read (n > 0).
|
||||
}
|
||||
}
|
||||
|
||||
// read() reads data from Reader to buffer.
|
||||
// It returns number of bytes read and any read error encountered.
|
||||
// Postconditions:
|
||||
// - dec.buf contains previously unread data and new data.
|
||||
// - dec.off is 0.
|
||||
func (dec *Decoder) read() (int, error) {
|
||||
// Grow buf if needed.
|
||||
const minRead = 512
|
||||
if cap(dec.buf)-len(dec.buf)+dec.off < minRead {
|
||||
oldUnreadBuf := dec.buf[dec.off:]
|
||||
dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead)
|
||||
dec.overwriteBuf(oldUnreadBuf)
|
||||
}
|
||||
|
||||
// Copy unread data over read data and reset off to 0.
|
||||
if dec.off > 0 {
|
||||
dec.overwriteBuf(dec.buf[dec.off:])
|
||||
}
|
||||
|
||||
// Read from reader and reslice buf.
|
||||
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (dec *Decoder) overwriteBuf(newBuf []byte) {
|
||||
n := copy(dec.buf, newBuf)
|
||||
dec.buf = dec.buf[:n]
|
||||
dec.off = 0
|
||||
}
|
||||
|
||||
// Encoder writes CBOR values to io.Writer.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
em *encMode
|
||||
indefTypes []cborType
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w using the default encoding options.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return defaultEncMode.NewEncoder(w)
|
||||
}
|
||||
|
||||
// Encode writes the CBOR encoding of v.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
if len(enc.indefTypes) > 0 && v != nil {
|
||||
indefType := enc.indefTypes[len(enc.indefTypes)-1]
|
||||
if indefType == cborTypeTextString {
|
||||
k := reflect.TypeOf(v).Kind()
|
||||
if k != reflect.String {
|
||||
return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string")
|
||||
}
|
||||
} else if indefType == cborTypeByteString {
|
||||
t := reflect.TypeOf(v)
|
||||
k := t.Kind()
|
||||
if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 {
|
||||
return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buf := getEncodeBuffer()
|
||||
|
||||
err := encode(buf, enc.em, reflect.ValueOf(v))
|
||||
if err == nil {
|
||||
_, err = enc.w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
putEncodeBuffer(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// StartIndefiniteByteString starts byte string encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings
|
||||
// ("chunks") as one contiguous string until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteByteString() error {
|
||||
return enc.startIndefinite(cborTypeByteString)
|
||||
}
|
||||
|
||||
// StartIndefiniteTextString starts text string encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes definite length text strings
|
||||
// ("chunks") as one contiguous string until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteTextString() error {
|
||||
return enc.startIndefinite(cborTypeTextString)
|
||||
}
|
||||
|
||||
// StartIndefiniteArray starts array encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes elements of the array
|
||||
// until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteArray() error {
|
||||
return enc.startIndefinite(cborTypeArray)
|
||||
}
|
||||
|
||||
// StartIndefiniteMap starts array encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes elements of the map
|
||||
// until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteMap() error {
|
||||
return enc.startIndefinite(cborTypeMap)
|
||||
}
|
||||
|
||||
// EndIndefinite closes last opened indefinite length value.
|
||||
func (enc *Encoder) EndIndefinite() error {
|
||||
if len(enc.indefTypes) == 0 {
|
||||
return errors.New("cbor: cannot encode \"break\" code outside indefinite length values")
|
||||
}
|
||||
_, err := enc.w.Write([]byte{cborBreakFlag})
|
||||
if err == nil {
|
||||
enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1]
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var cborIndefHeader = map[cborType][]byte{
|
||||
cborTypeByteString: {cborByteStringWithIndefiniteLengthHead},
|
||||
cborTypeTextString: {cborTextStringWithIndefiniteLengthHead},
|
||||
cborTypeArray: {cborArrayWithIndefiniteLengthHead},
|
||||
cborTypeMap: {cborMapWithIndefiniteLengthHead},
|
||||
}
|
||||
|
||||
func (enc *Encoder) startIndefinite(typ cborType) error {
|
||||
if enc.em.indefLength == IndefLengthForbidden {
|
||||
return &IndefiniteLengthError{typ}
|
||||
}
|
||||
_, err := enc.w.Write(cborIndefHeader[typ])
|
||||
if err == nil {
|
||||
enc.indefTypes = append(enc.indefTypes, typ)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RawMessage is a raw encoded CBOR value.
|
||||
type RawMessage []byte
|
||||
|
||||
// MarshalCBOR returns m or CBOR nil if m is nil.
|
||||
func (m RawMessage) MarshalCBOR() ([]byte, error) {
|
||||
if len(m) == 0 {
|
||||
return cborNil, nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// UnmarshalCBOR creates a copy of data and saves to *m.
|
||||
func (m *RawMessage) UnmarshalCBOR(data []byte) error {
|
||||
if m == nil {
|
||||
return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
*m = append((*m)[0:0], data...)
|
||||
return nil
|
||||
}
|
||||
260
vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
Normal file
260
vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type field struct {
|
||||
name string
|
||||
nameAsInt int64 // used to decoder to match field name with CBOR int
|
||||
cborName []byte
|
||||
cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3
|
||||
idx []int
|
||||
typ reflect.Type
|
||||
ef encodeFunc
|
||||
ief isEmptyFunc
|
||||
typInfo *typeInfo // used to decoder to reuse type info
|
||||
tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields)
|
||||
omitEmpty bool // used to skip empty field
|
||||
keyAsInt bool // used to encode/decode field name as int
|
||||
}
|
||||
|
||||
type fields []*field
|
||||
|
||||
// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth.
|
||||
type indexFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *indexFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *indexFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *indexFieldSorter) Less(i, j int) bool {
|
||||
iIdx, jIdx := x.fields[i].idx, x.fields[j].idx
|
||||
for k := 0; k < len(iIdx) && k < len(jIdx); k++ {
|
||||
if iIdx[k] != jIdx[k] {
|
||||
return iIdx[k] < jIdx[k]
|
||||
}
|
||||
}
|
||||
return len(iIdx) <= len(jIdx)
|
||||
}
|
||||
|
||||
// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag.
|
||||
type nameLevelAndTagFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *nameLevelAndTagFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *nameLevelAndTagFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool {
|
||||
fi, fj := x.fields[i], x.fields[j]
|
||||
if fi.name != fj.name {
|
||||
return fi.name < fj.name
|
||||
}
|
||||
if len(fi.idx) != len(fj.idx) {
|
||||
return len(fi.idx) < len(fj.idx)
|
||||
}
|
||||
if fi.tagged != fj.tagged {
|
||||
return fi.tagged
|
||||
}
|
||||
return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters.
|
||||
}
|
||||
|
||||
// getFields returns visible fields of struct type t following visibility rules for JSON encoding.
|
||||
func getFields(t reflect.Type) (flds fields, structOptions string) {
|
||||
// Get special field "_" tag options
|
||||
if f, ok := t.FieldByName("_"); ok {
|
||||
tag := f.Tag.Get("cbor")
|
||||
if tag != "-" {
|
||||
structOptions = tag
|
||||
}
|
||||
}
|
||||
|
||||
// nTypes contains next level anonymous fields' types and indexes
|
||||
// (there can be multiple fields of the same type at the same level)
|
||||
flds, nTypes := appendFields(t, nil, nil, nil)
|
||||
|
||||
if len(nTypes) > 0 {
|
||||
|
||||
var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes
|
||||
vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels
|
||||
|
||||
for len(nTypes) > 0 {
|
||||
cTypes, nTypes = nTypes, nil
|
||||
|
||||
for t, idx := range cTypes {
|
||||
// If there are multiple anonymous fields of the same struct type at the same level, all are ignored.
|
||||
if len(idx) > 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Anonymous field of the same type at deeper nested level is ignored.
|
||||
if vTypes[t] {
|
||||
continue
|
||||
}
|
||||
vTypes[t] = true
|
||||
|
||||
flds, nTypes = appendFields(t, idx[0], flds, nTypes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(&nameLevelAndTagFieldSorter{flds})
|
||||
|
||||
// Keep visible fields.
|
||||
j := 0 // index of next unique field
|
||||
for i := 0; i < len(flds); {
|
||||
name := flds[i].name
|
||||
if i == len(flds)-1 || // last field
|
||||
name != flds[i+1].name || // field i has unique field name
|
||||
len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1
|
||||
(flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not
|
||||
flds[j] = flds[i]
|
||||
j++
|
||||
}
|
||||
|
||||
// Skip fields with the same field name.
|
||||
for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive
|
||||
}
|
||||
}
|
||||
if j != len(flds) {
|
||||
flds = flds[:j]
|
||||
}
|
||||
|
||||
// Sort fields by field index
|
||||
sort.Sort(&indexFieldSorter{flds})
|
||||
|
||||
return flds, structOptions
|
||||
}
|
||||
|
||||
// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes .
|
||||
func appendFields(
|
||||
t reflect.Type,
|
||||
idx []int,
|
||||
flds fields,
|
||||
nTypes map[reflect.Type][][]int,
|
||||
) (
|
||||
_flds fields,
|
||||
_nTypes map[reflect.Type][][]int,
|
||||
) {
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
|
||||
ft := f.Type
|
||||
for ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
if !isFieldExportable(f, ft.Kind()) {
|
||||
continue
|
||||
}
|
||||
|
||||
tag := f.Tag.Get("cbor")
|
||||
if tag == "" {
|
||||
tag = f.Tag.Get("json")
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
tagged := tag != ""
|
||||
|
||||
// Parse field tag options
|
||||
var tagFieldName string
|
||||
var omitempty, keyasint bool
|
||||
for j := 0; tag != ""; j++ {
|
||||
var token string
|
||||
idx := strings.IndexByte(tag, ',')
|
||||
if idx == -1 {
|
||||
token, tag = tag, ""
|
||||
} else {
|
||||
token, tag = tag[:idx], tag[idx+1:]
|
||||
}
|
||||
if j == 0 {
|
||||
tagFieldName = token
|
||||
} else {
|
||||
switch token {
|
||||
case "omitempty":
|
||||
omitempty = true
|
||||
case "keyasint":
|
||||
keyasint = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fieldName := tagFieldName
|
||||
if tagFieldName == "" {
|
||||
fieldName = f.Name
|
||||
}
|
||||
|
||||
fIdx := make([]int, len(idx)+1)
|
||||
copy(fIdx, idx)
|
||||
fIdx[len(fIdx)-1] = i
|
||||
|
||||
if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" {
|
||||
flds = append(flds, &field{
|
||||
name: fieldName,
|
||||
idx: fIdx,
|
||||
typ: f.Type,
|
||||
omitEmpty: omitempty,
|
||||
keyAsInt: keyasint,
|
||||
tagged: tagged})
|
||||
} else {
|
||||
if nTypes == nil {
|
||||
nTypes = make(map[reflect.Type][][]int)
|
||||
}
|
||||
nTypes[ft] = append(nTypes[ft], fIdx)
|
||||
}
|
||||
}
|
||||
|
||||
return flds, nTypes
|
||||
}
|
||||
|
||||
// isFieldExportable returns true if f is an exportable (regular or anonymous) field or
|
||||
// a nonexportable anonymous field of struct type.
|
||||
// Nonexportable anonymous field of struct type can contain exportable fields.
|
||||
func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam
|
||||
exportable := f.PkgPath == ""
|
||||
return exportable || (f.Anonymous && fk == reflect.Struct)
|
||||
}
|
||||
|
||||
type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error)
|
||||
|
||||
// getFieldValue returns field value of struct v by index. When encountering null pointer
|
||||
// to anonymous (embedded) struct field, f is called with the last traversed field value.
|
||||
func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) {
|
||||
fv = v
|
||||
for i, n := range idx {
|
||||
fv = fv.Field(n)
|
||||
|
||||
if i < len(idx)-1 {
|
||||
if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
|
||||
if fv.IsNil() {
|
||||
// Null pointer to embedded struct field
|
||||
fv, err = f(fv)
|
||||
if err != nil || !fv.IsValid() {
|
||||
return fv, err
|
||||
}
|
||||
}
|
||||
fv = fv.Elem()
|
||||
}
|
||||
}
|
||||
}
|
||||
return fv, nil
|
||||
}
|
||||
299
vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
Normal file
299
vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and
|
||||
// unmarshaling of tag content is subject to any encode and decode options that would apply to
|
||||
// enclosed data item if it were to appear outside of a tag.
|
||||
type Tag struct {
|
||||
Number uint64
|
||||
Content interface{}
|
||||
}
|
||||
|
||||
// RawTag represents CBOR tag data, including tag number and raw tag content.
|
||||
// RawTag implements Unmarshaler and Marshaler interfaces.
|
||||
type RawTag struct {
|
||||
Number uint64
|
||||
Content RawMessage
|
||||
}
|
||||
|
||||
// UnmarshalCBOR sets *t with tag number and raw tag content copied from data.
|
||||
func (t *RawTag) UnmarshalCBOR(data []byte) error {
|
||||
if t == nil {
|
||||
return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
// Decoding CBOR null and undefined to cbor.RawTag is no-op.
|
||||
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||
return nil
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Unmarshal tag number.
|
||||
typ, _, num := d.getHead()
|
||||
if typ != cborTypeTag {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()}
|
||||
}
|
||||
t.Number = num
|
||||
|
||||
// Unmarshal tag content.
|
||||
c := d.data[d.off:]
|
||||
t.Content = make([]byte, len(c))
|
||||
copy(t.Content, c)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalCBOR returns CBOR encoding of t.
|
||||
func (t RawTag) MarshalCBOR() ([]byte, error) {
|
||||
if t.Number == 0 && len(t.Content) == 0 {
|
||||
// Marshal uninitialized cbor.RawTag
|
||||
b := make([]byte, len(cborNil))
|
||||
copy(b, cborNil)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
e := getEncodeBuffer()
|
||||
|
||||
encodeHead(e, byte(cborTypeTag), t.Number)
|
||||
|
||||
content := t.Content
|
||||
if len(content) == 0 {
|
||||
content = cborNil
|
||||
}
|
||||
|
||||
buf := make([]byte, len(e.Bytes())+len(content))
|
||||
n := copy(buf, e.Bytes())
|
||||
copy(buf[n:], content)
|
||||
|
||||
putEncodeBuffer(e)
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// DecTagMode specifies how decoder handles tag number.
|
||||
type DecTagMode int
|
||||
|
||||
const (
|
||||
// DecTagIgnored makes decoder ignore tag number (skips if present).
|
||||
DecTagIgnored DecTagMode = iota
|
||||
|
||||
// DecTagOptional makes decoder verify tag number if it's present.
|
||||
DecTagOptional
|
||||
|
||||
// DecTagRequired makes decoder verify tag number and tag number must be present.
|
||||
DecTagRequired
|
||||
|
||||
maxDecTagMode
|
||||
)
|
||||
|
||||
func (dtm DecTagMode) valid() bool {
|
||||
return dtm >= 0 && dtm < maxDecTagMode
|
||||
}
|
||||
|
||||
// EncTagMode specifies how encoder handles tag number.
|
||||
type EncTagMode int
|
||||
|
||||
const (
|
||||
// EncTagNone makes encoder not encode tag number.
|
||||
EncTagNone EncTagMode = iota
|
||||
|
||||
// EncTagRequired makes encoder encode tag number.
|
||||
EncTagRequired
|
||||
|
||||
maxEncTagMode
|
||||
)
|
||||
|
||||
func (etm EncTagMode) valid() bool {
|
||||
return etm >= 0 && etm < maxEncTagMode
|
||||
}
|
||||
|
||||
// TagOptions specifies how encoder and decoder handle tag number.
|
||||
type TagOptions struct {
|
||||
DecTag DecTagMode
|
||||
EncTag EncTagMode
|
||||
}
|
||||
|
||||
// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode
|
||||
// to provide CBOR tag support.
|
||||
type TagSet interface {
|
||||
// Add adds given tag number(s), content type, and tag options to TagSet.
|
||||
Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error
|
||||
|
||||
// Remove removes given tag content type from TagSet.
|
||||
Remove(contentType reflect.Type)
|
||||
|
||||
tagProvider
|
||||
}
|
||||
|
||||
type tagProvider interface {
|
||||
getTagItemFromType(t reflect.Type) *tagItem
|
||||
getTypeFromTagNum(num []uint64) reflect.Type
|
||||
}
|
||||
|
||||
type tagItem struct {
|
||||
num []uint64
|
||||
cborTagNum []byte
|
||||
contentType reflect.Type
|
||||
opts TagOptions
|
||||
}
|
||||
|
||||
func (t *tagItem) equalTagNum(num []uint64) bool {
|
||||
// Fast path to compare 1 tag number
|
||||
if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(t.num) != len(num) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 0; i < len(t.num); i++ {
|
||||
if t.num[i] != num[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type (
|
||||
tagSet map[reflect.Type]*tagItem
|
||||
|
||||
syncTagSet struct {
|
||||
sync.RWMutex
|
||||
t tagSet
|
||||
}
|
||||
)
|
||||
|
||||
func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem {
|
||||
return t[typ]
|
||||
}
|
||||
|
||||
func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type {
|
||||
for typ, tag := range t {
|
||||
if tag.equalTagNum(num) {
|
||||
return typ
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewTagSet returns TagSet (safe for concurrency).
|
||||
func NewTagSet() TagSet {
|
||||
return &syncTagSet{t: make(map[reflect.Type]*tagItem)}
|
||||
}
|
||||
|
||||
// Add adds given tag number(s), content type, and tag options to TagSet.
|
||||
func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error {
|
||||
if contentType == nil {
|
||||
return errors.New("cbor: cannot add nil content type to TagSet")
|
||||
}
|
||||
for contentType.Kind() == reflect.Ptr {
|
||||
contentType = contentType.Elem()
|
||||
}
|
||||
tag, err := newTagItem(opts, contentType, num, nestedNum...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
for typ, ti := range t.t {
|
||||
if typ == contentType {
|
||||
return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet")
|
||||
}
|
||||
if ti.equalTagNum(tag.num) {
|
||||
return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num)
|
||||
}
|
||||
}
|
||||
t.t[contentType] = tag
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes given tag content type from TagSet.
|
||||
func (t *syncTagSet) Remove(contentType reflect.Type) {
|
||||
for contentType.Kind() == reflect.Ptr {
|
||||
contentType = contentType.Elem()
|
||||
}
|
||||
t.Lock()
|
||||
delete(t.t, contentType)
|
||||
t.Unlock()
|
||||
}
|
||||
|
||||
func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem {
|
||||
t.RLock()
|
||||
ti := t.t[typ]
|
||||
t.RUnlock()
|
||||
return ti
|
||||
}
|
||||
|
||||
func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type {
|
||||
t.RLock()
|
||||
rt := t.t.getTypeFromTagNum(num)
|
||||
t.RUnlock()
|
||||
return rt
|
||||
}
|
||||
|
||||
func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) {
|
||||
if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone {
|
||||
return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet")
|
||||
}
|
||||
if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface {
|
||||
return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String())
|
||||
}
|
||||
if contentType == typeTime {
|
||||
return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
|
||||
}
|
||||
if contentType == typeBigInt {
|
||||
return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically")
|
||||
}
|
||||
if contentType == typeTag {
|
||||
return nil, errors.New("cbor: cannot add cbor.Tag to TagSet")
|
||||
}
|
||||
if contentType == typeRawTag {
|
||||
return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet")
|
||||
}
|
||||
if num == 0 || num == 1 {
|
||||
return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
|
||||
}
|
||||
if num == 2 || num == 3 {
|
||||
return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically")
|
||||
}
|
||||
if num == tagNumSelfDescribedCBOR {
|
||||
return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically")
|
||||
}
|
||||
|
||||
te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType}
|
||||
te.num = append(te.num, nestedNum...)
|
||||
|
||||
// Cache encoded tag numbers
|
||||
e := getEncodeBuffer()
|
||||
for _, n := range te.num {
|
||||
encodeHead(e, byte(cborTypeTag), n)
|
||||
}
|
||||
te.cborTagNum = make([]byte, e.Len())
|
||||
copy(te.cborTagNum, e.Bytes())
|
||||
putEncodeBuffer(e)
|
||||
|
||||
return &te, nil
|
||||
}
|
||||
|
||||
var (
|
||||
typeTag = reflect.TypeOf(Tag{})
|
||||
typeRawTag = reflect.TypeOf(RawTag{})
|
||||
)
|
||||
|
||||
// WrongTagError describes mismatch between CBOR tag and registered tag.
|
||||
type WrongTagError struct {
|
||||
RegisteredType reflect.Type
|
||||
RegisteredTagNum []uint64
|
||||
TagNum []uint64
|
||||
}
|
||||
|
||||
func (e *WrongTagError) Error() string {
|
||||
return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum)
|
||||
}
|
||||
394
vendor/github.com/fxamacker/cbor/v2/valid.go
generated
vendored
Normal file
394
vendor/github.com/fxamacker/cbor/v2/valid.go
generated
vendored
Normal file
@@ -0,0 +1,394 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/x448/float16"
|
||||
)
|
||||
|
||||
// SyntaxError is a description of a CBOR syntax error.
|
||||
type SyntaxError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string { return e.msg }
|
||||
|
||||
// SemanticError is a description of a CBOR semantic error.
|
||||
type SemanticError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *SemanticError) Error() string { return e.msg }
|
||||
|
||||
// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags.
|
||||
type MaxNestedLevelError struct {
|
||||
maxNestedLevels int
|
||||
}
|
||||
|
||||
func (e *MaxNestedLevelError) Error() string {
|
||||
return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels)
|
||||
}
|
||||
|
||||
// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays.
|
||||
type MaxArrayElementsError struct {
|
||||
maxArrayElements int
|
||||
}
|
||||
|
||||
func (e *MaxArrayElementsError) Error() string {
|
||||
return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array"
|
||||
}
|
||||
|
||||
// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps.
|
||||
type MaxMapPairsError struct {
|
||||
maxMapPairs int
|
||||
}
|
||||
|
||||
func (e *MaxMapPairsError) Error() string {
|
||||
return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map"
|
||||
}
|
||||
|
||||
// IndefiniteLengthError indicates found disallowed indefinite length items.
|
||||
type IndefiniteLengthError struct {
|
||||
t cborType
|
||||
}
|
||||
|
||||
func (e *IndefiniteLengthError) Error() string {
|
||||
return "cbor: indefinite-length " + e.t.String() + " isn't allowed"
|
||||
}
|
||||
|
||||
// TagsMdError indicates found disallowed CBOR tags.
|
||||
type TagsMdError struct {
|
||||
}
|
||||
|
||||
func (e *TagsMdError) Error() string {
|
||||
return "cbor: CBOR tag isn't allowed"
|
||||
}
|
||||
|
||||
// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item.
|
||||
type ExtraneousDataError struct {
|
||||
numOfBytes int // number of bytes of extraneous data
|
||||
index int // location of extraneous data
|
||||
}
|
||||
|
||||
func (e *ExtraneousDataError) Error() string {
|
||||
return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index)
|
||||
}
|
||||
|
||||
// wellformed checks whether the CBOR data item is well-formed.
|
||||
// allowExtraData indicates if extraneous data is allowed after the CBOR data item.
|
||||
// - use allowExtraData = true when using Decoder.Decode()
|
||||
// - use allowExtraData = false when using Unmarshal()
|
||||
func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error {
|
||||
if len(d.data) == d.off {
|
||||
return io.EOF
|
||||
}
|
||||
_, err := d.wellformedInternal(0, checkBuiltinTags)
|
||||
if err == nil {
|
||||
if !allowExtraData && d.off != len(d.data) {
|
||||
err = &ExtraneousDataError{len(d.data) - d.off, d.off}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// wellformedInternal checks data's well-formedness and returns max depth and error.
|
||||
func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo
|
||||
t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch t {
|
||||
case cborTypeByteString, cborTypeTextString:
|
||||
if indefiniteLength {
|
||||
if d.dm.indefLength == IndefLengthForbidden {
|
||||
return 0, &IndefiniteLengthError{t}
|
||||
}
|
||||
return d.wellformedIndefiniteString(t, depth, checkBuiltinTags)
|
||||
}
|
||||
valInt := int(val)
|
||||
if valInt < 0 {
|
||||
// Detect integer overflow
|
||||
return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow")
|
||||
}
|
||||
if len(d.data)-d.off < valInt { // valInt+off may overflow integer
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
d.off += valInt
|
||||
|
||||
case cborTypeArray, cborTypeMap:
|
||||
depth++
|
||||
if depth > d.dm.maxNestedLevels {
|
||||
return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
|
||||
}
|
||||
|
||||
if indefiniteLength {
|
||||
if d.dm.indefLength == IndefLengthForbidden {
|
||||
return 0, &IndefiniteLengthError{t}
|
||||
}
|
||||
return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags)
|
||||
}
|
||||
|
||||
valInt := int(val)
|
||||
if valInt < 0 {
|
||||
// Detect integer overflow
|
||||
return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow")
|
||||
}
|
||||
|
||||
if t == cborTypeArray {
|
||||
if valInt > d.dm.maxArrayElements {
|
||||
return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
|
||||
}
|
||||
} else {
|
||||
if valInt > d.dm.maxMapPairs {
|
||||
return 0, &MaxMapPairsError{d.dm.maxMapPairs}
|
||||
}
|
||||
}
|
||||
|
||||
count := 1
|
||||
if t == cborTypeMap {
|
||||
count = 2
|
||||
}
|
||||
maxDepth := depth
|
||||
for j := 0; j < count; j++ {
|
||||
for i := 0; i < valInt; i++ {
|
||||
var dpt int
|
||||
if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if dpt > maxDepth {
|
||||
maxDepth = dpt // Save max depth
|
||||
}
|
||||
}
|
||||
}
|
||||
depth = maxDepth
|
||||
|
||||
case cborTypeTag:
|
||||
if d.dm.tagsMd == TagsForbidden {
|
||||
return 0, &TagsMdError{}
|
||||
}
|
||||
|
||||
tagNum := val
|
||||
|
||||
// Scan nested tag numbers to avoid recursion.
|
||||
for {
|
||||
if len(d.data) == d.off { // Tag number must be followed by tag content.
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if checkBuiltinTags {
|
||||
err = validBuiltinTag(tagNum, d.data[d.off])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) {
|
||||
return 0, &UnacceptableDataItemError{
|
||||
CBORType: cborTypeTag.String(),
|
||||
Message: "bignum",
|
||||
}
|
||||
}
|
||||
if getType(d.data[d.off]) != cborTypeTag {
|
||||
break
|
||||
}
|
||||
if _, _, tagNum, err = d.wellformedHead(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
depth++
|
||||
if depth > d.dm.maxNestedLevels {
|
||||
return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
|
||||
}
|
||||
}
|
||||
// Check tag content.
|
||||
return d.wellformedInternal(depth, checkBuiltinTags)
|
||||
}
|
||||
|
||||
return depth, nil
|
||||
}
|
||||
|
||||
// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error.
|
||||
func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) {
|
||||
var err error
|
||||
for {
|
||||
if len(d.data) == d.off {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if isBreakFlag(d.data[d.off]) {
|
||||
d.off++
|
||||
break
|
||||
}
|
||||
// Peek ahead to get next type and indefinite length status.
|
||||
nt, ai := parseInitialByte(d.data[d.off])
|
||||
if t != nt {
|
||||
return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()}
|
||||
}
|
||||
if additionalInformation(ai).isIndefiniteLength() {
|
||||
return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"}
|
||||
}
|
||||
if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return depth, nil
|
||||
}
|
||||
|
||||
// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error.
|
||||
func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) {
|
||||
var err error
|
||||
maxDepth := depth
|
||||
i := 0
|
||||
for {
|
||||
if len(d.data) == d.off {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if isBreakFlag(d.data[d.off]) {
|
||||
d.off++
|
||||
break
|
||||
}
|
||||
var dpt int
|
||||
if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if dpt > maxDepth {
|
||||
maxDepth = dpt
|
||||
}
|
||||
i++
|
||||
if t == cborTypeArray {
|
||||
if i > d.dm.maxArrayElements {
|
||||
return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
|
||||
}
|
||||
} else {
|
||||
if i%2 == 0 && i/2 > d.dm.maxMapPairs {
|
||||
return 0, &MaxMapPairsError{d.dm.maxMapPairs}
|
||||
}
|
||||
}
|
||||
}
|
||||
if t == cborTypeMap && i%2 == 1 {
|
||||
return 0, &SyntaxError{"cbor: unexpected \"break\" code"}
|
||||
}
|
||||
return maxDepth, nil
|
||||
}
|
||||
|
||||
func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() (
|
||||
t cborType,
|
||||
ai byte,
|
||||
val uint64,
|
||||
indefiniteLength bool,
|
||||
err error,
|
||||
) {
|
||||
t, ai, val, err = d.wellformedHead()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
indefiniteLength = additionalInformation(ai).isIndefiniteLength()
|
||||
return
|
||||
}
|
||||
|
||||
func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) {
|
||||
dataLen := len(d.data) - d.off
|
||||
if dataLen == 0 {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
t, ai = parseInitialByte(d.data[d.off])
|
||||
val = uint64(ai)
|
||||
d.off++
|
||||
dataLen--
|
||||
|
||||
if ai <= maxAdditionalInformationWithoutArgument {
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith1ByteArgument {
|
||||
const argumentSize = 1
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = uint64(d.data[d.off])
|
||||
d.off++
|
||||
if t == cborTypePrimitives && val < 32 {
|
||||
return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith2ByteArgument {
|
||||
const argumentSize = 2
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize]))
|
||||
d.off += argumentSize
|
||||
if t == cborTypePrimitives {
|
||||
if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith4ByteArgument {
|
||||
const argumentSize = 4
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize]))
|
||||
d.off += argumentSize
|
||||
if t == cborTypePrimitives {
|
||||
if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith8ByteArgument {
|
||||
const argumentSize = 8
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize])
|
||||
d.off += argumentSize
|
||||
if t == cborTypePrimitives {
|
||||
if err := d.acceptableFloat(math.Float64frombits(val)); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if additionalInformation(ai).isIndefiniteLength() {
|
||||
switch t {
|
||||
case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag:
|
||||
return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
|
||||
case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite().
|
||||
return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
// ai == 28, 29, 30
|
||||
return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
|
||||
}
|
||||
|
||||
func (d *decoder) acceptableFloat(f float64) error {
|
||||
switch {
|
||||
case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f):
|
||||
return &UnacceptableDataItemError{
|
||||
CBORType: cborTypePrimitives.String(),
|
||||
Message: "floating-point NaN",
|
||||
}
|
||||
case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0):
|
||||
return &UnacceptableDataItemError{
|
||||
CBORType: cborTypePrimitives.String(),
|
||||
Message: "floating-point infinity",
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
61
vendor/github.com/go-logr/logr/slogr/slogr.go
generated
vendored
61
vendor/github.com/go-logr/logr/slogr/slogr.go
generated
vendored
@@ -1,61 +0,0 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package slogr enables usage of a slog.Handler with logr.Logger as front-end
|
||||
// API and of a logr.LogSink through the slog.Handler and thus slog.Logger
|
||||
// APIs.
|
||||
//
|
||||
// See the README in the top-level [./logr] package for a discussion of
|
||||
// interoperability.
|
||||
//
|
||||
// Deprecated: use the main logr package instead.
|
||||
package slogr
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// NewLogr returns a logr.Logger which writes to the slog.Handler.
|
||||
//
|
||||
// Deprecated: use [logr.FromSlogHandler] instead.
|
||||
func NewLogr(handler slog.Handler) logr.Logger {
|
||||
return logr.FromSlogHandler(handler)
|
||||
}
|
||||
|
||||
// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
|
||||
//
|
||||
// Deprecated: use [logr.ToSlogHandler] instead.
|
||||
func NewSlogHandler(logger logr.Logger) slog.Handler {
|
||||
return logr.ToSlogHandler(logger)
|
||||
}
|
||||
|
||||
// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
|
||||
//
|
||||
// Deprecated: use [logr.ToSlogHandler] instead.
|
||||
func ToSlogHandler(logger logr.Logger) slog.Handler {
|
||||
return logr.ToSlogHandler(logger)
|
||||
}
|
||||
|
||||
// SlogSink is an optional interface that a LogSink can implement to support
|
||||
// logging through the slog.Logger or slog.Handler APIs better.
|
||||
//
|
||||
// Deprecated: use [logr.SlogSink] instead.
|
||||
type SlogSink = logr.SlogSink
|
||||
27
vendor/github.com/google/go-cmp/LICENSE
generated
vendored
Normal file
27
vendor/github.com/google/go-cmp/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2017 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
671
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
Normal file
671
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
Normal file
@@ -0,0 +1,671 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cmp determines equality of values.
|
||||
//
|
||||
// This package is intended to be a more powerful and safer alternative to
|
||||
// [reflect.DeepEqual] for comparing whether two values are semantically equal.
|
||||
// It is intended to only be used in tests, as performance is not a goal and
|
||||
// it may panic if it cannot compare the values. Its propensity towards
|
||||
// panicking means that its unsuitable for production environments where a
|
||||
// spurious panic may be fatal.
|
||||
//
|
||||
// The primary features of cmp are:
|
||||
//
|
||||
// - When the default behavior of equality does not suit the test's needs,
|
||||
// custom equality functions can override the equality operation.
|
||||
// For example, an equality function may report floats as equal so long as
|
||||
// they are within some tolerance of each other.
|
||||
//
|
||||
// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
|
||||
// to determine equality. This allows package authors to determine
|
||||
// the equality operation for the types that they define.
|
||||
//
|
||||
// - If no custom equality functions are used and no Equal method is defined,
|
||||
// equality is determined by recursively comparing the primitive kinds on
|
||||
// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
|
||||
// unexported fields are not compared by default; they result in panics
|
||||
// unless suppressed by using an [Ignore] option
|
||||
// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
|
||||
// or explicitly compared using the [Exporter] option.
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// TODO(≥go1.18): Use any instead of interface{}.
|
||||
|
||||
// Equal reports whether x and y are equal by recursively applying the
|
||||
// following rules in the given order to x and y and all of their sub-values:
|
||||
//
|
||||
// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
|
||||
// remain after applying all path filters, value filters, and type filters.
|
||||
// If at least one [Ignore] exists in S, then the comparison is ignored.
|
||||
// If the number of [Transformer] and [Comparer] options in S is non-zero,
|
||||
// then Equal panics because it is ambiguous which option to use.
|
||||
// If S contains a single [Transformer], then use that to transform
|
||||
// the current values and recursively call Equal on the output values.
|
||||
// If S contains a single [Comparer], then use that to compare the current values.
|
||||
// Otherwise, evaluation proceeds to the next rule.
|
||||
//
|
||||
// - If the values have an Equal method of the form "(T) Equal(T) bool" or
|
||||
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
||||
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
||||
// evaluation proceeds to the next rule.
|
||||
//
|
||||
// - Lastly, try to compare x and y based on their basic kinds.
|
||||
// Simple kinds like booleans, integers, floats, complex numbers, strings,
|
||||
// and channels are compared using the equivalent of the == operator in Go.
|
||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
||||
//
|
||||
// Structs are equal if recursively calling Equal on all fields report equal.
|
||||
// If a struct contains unexported fields, Equal panics unless an [Ignore] option
|
||||
// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
|
||||
// or the [Exporter] option explicitly permits comparing the unexported field.
|
||||
//
|
||||
// Slices are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored slice or array elements report equal.
|
||||
// Empty non-nil slices and nil slices are not equal; to equate empty slices,
|
||||
// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
|
||||
//
|
||||
// Maps are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored map entries report equal.
|
||||
// Map keys are equal according to the == operator.
|
||||
// To use custom comparisons for map keys, consider using
|
||||
// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
|
||||
// Empty non-nil maps and nil maps are not equal; to equate empty maps,
|
||||
// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
|
||||
//
|
||||
// Pointers and interfaces are equal if they are both nil or both non-nil,
|
||||
// where they have the same underlying concrete type and recursively
|
||||
// calling Equal on the underlying values reports equal.
|
||||
//
|
||||
// Before recursing into a pointer, slice element, or map, the current path
|
||||
// is checked to detect whether the address has already been visited.
|
||||
// If there is a cycle, then the pointed at values are considered equal
|
||||
// only if both addresses were previously visited in the same path step.
|
||||
func Equal(x, y interface{}, opts ...Option) bool {
|
||||
s := newState(opts)
|
||||
s.compareAny(rootStep(x, y))
|
||||
return s.result.Equal()
|
||||
}
|
||||
|
||||
// Diff returns a human-readable report of the differences between two values:
|
||||
// y - x. It returns an empty string if and only if Equal returns true for the
|
||||
// same input values and options.
|
||||
//
|
||||
// The output is displayed as a literal in pseudo-Go syntax.
|
||||
// At the start of each line, a "-" prefix indicates an element removed from x,
|
||||
// a "+" prefix to indicates an element added from y, and the lack of a prefix
|
||||
// indicates an element common to both x and y. If possible, the output
|
||||
// uses fmt.Stringer.String or error.Error methods to produce more humanly
|
||||
// readable outputs. In such cases, the string is prefixed with either an
|
||||
// 's' or 'e' character, respectively, to indicate that the method was called.
|
||||
//
|
||||
// Do not depend on this output being stable. If you need the ability to
|
||||
// programmatically interpret the difference, consider using a custom Reporter.
|
||||
func Diff(x, y interface{}, opts ...Option) string {
|
||||
s := newState(opts)
|
||||
|
||||
// Optimization: If there are no other reporters, we can optimize for the
|
||||
// common case where the result is equal (and thus no reported difference).
|
||||
// This avoids the expensive construction of a difference tree.
|
||||
if len(s.reporters) == 0 {
|
||||
s.compareAny(rootStep(x, y))
|
||||
if s.result.Equal() {
|
||||
return ""
|
||||
}
|
||||
s.result = diff.Result{} // Reset results
|
||||
}
|
||||
|
||||
r := new(defaultReporter)
|
||||
s.reporters = append(s.reporters, reporter{r})
|
||||
s.compareAny(rootStep(x, y))
|
||||
d := r.String()
|
||||
if (d == "") != s.result.Equal() {
|
||||
panic("inconsistent difference and equality results")
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// rootStep constructs the first path step. If x and y have differing types,
|
||||
// then they are stored within an empty interface type.
|
||||
func rootStep(x, y interface{}) PathStep {
|
||||
vx := reflect.ValueOf(x)
|
||||
vy := reflect.ValueOf(y)
|
||||
|
||||
// If the inputs are different types, auto-wrap them in an empty interface
|
||||
// so that they have the same parent type.
|
||||
var t reflect.Type
|
||||
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
|
||||
t = anyType
|
||||
if vx.IsValid() {
|
||||
vvx := reflect.New(t).Elem()
|
||||
vvx.Set(vx)
|
||||
vx = vvx
|
||||
}
|
||||
if vy.IsValid() {
|
||||
vvy := reflect.New(t).Elem()
|
||||
vvy.Set(vy)
|
||||
vy = vvy
|
||||
}
|
||||
} else {
|
||||
t = vx.Type()
|
||||
}
|
||||
|
||||
return &pathStep{t, vx, vy}
|
||||
}
|
||||
|
||||
type state struct {
|
||||
// These fields represent the "comparison state".
|
||||
// Calling statelessCompare must not result in observable changes to these.
|
||||
result diff.Result // The current result of comparison
|
||||
curPath Path // The current path in the value tree
|
||||
curPtrs pointerPath // The current set of visited pointers
|
||||
reporters []reporter // Optional reporters
|
||||
|
||||
// recChecker checks for infinite cycles applying the same set of
|
||||
// transformers upon the output of itself.
|
||||
recChecker recChecker
|
||||
|
||||
// dynChecker triggers pseudo-random checks for option correctness.
|
||||
// It is safe for statelessCompare to mutate this value.
|
||||
dynChecker dynChecker
|
||||
|
||||
// These fields, once set by processOption, will not change.
|
||||
exporters []exporter // List of exporters for structs with unexported fields
|
||||
opts Options // List of all fundamental and filter options
|
||||
}
|
||||
|
||||
func newState(opts []Option) *state {
|
||||
// Always ensure a validator option exists to validate the inputs.
|
||||
s := &state{opts: Options{validator{}}}
|
||||
s.curPtrs.Init()
|
||||
s.processOption(Options(opts))
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *state) processOption(opt Option) {
|
||||
switch opt := opt.(type) {
|
||||
case nil:
|
||||
case Options:
|
||||
for _, o := range opt {
|
||||
s.processOption(o)
|
||||
}
|
||||
case coreOption:
|
||||
type filtered interface {
|
||||
isFiltered() bool
|
||||
}
|
||||
if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
|
||||
panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
|
||||
}
|
||||
s.opts = append(s.opts, opt)
|
||||
case exporter:
|
||||
s.exporters = append(s.exporters, opt)
|
||||
case reporter:
|
||||
s.reporters = append(s.reporters, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown option %T", opt))
|
||||
}
|
||||
}
|
||||
|
||||
// statelessCompare compares two values and returns the result.
|
||||
// This function is stateless in that it does not alter the current result,
|
||||
// or output to any registered reporters.
|
||||
func (s *state) statelessCompare(step PathStep) diff.Result {
|
||||
// We do not save and restore curPath and curPtrs because all of the
|
||||
// compareX methods should properly push and pop from them.
|
||||
// It is an implementation bug if the contents of the paths differ from
|
||||
// when calling this function to when returning from it.
|
||||
|
||||
oldResult, oldReporters := s.result, s.reporters
|
||||
s.result = diff.Result{} // Reset result
|
||||
s.reporters = nil // Remove reporters to avoid spurious printouts
|
||||
s.compareAny(step)
|
||||
res := s.result
|
||||
s.result, s.reporters = oldResult, oldReporters
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *state) compareAny(step PathStep) {
|
||||
// Update the path stack.
|
||||
s.curPath.push(step)
|
||||
defer s.curPath.pop()
|
||||
for _, r := range s.reporters {
|
||||
r.PushStep(step)
|
||||
defer r.PopStep()
|
||||
}
|
||||
s.recChecker.Check(s.curPath)
|
||||
|
||||
// Cycle-detection for slice elements (see NOTE in compareSlice).
|
||||
t := step.Type()
|
||||
vx, vy := step.Values()
|
||||
if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
|
||||
px, py := vx.Addr(), vy.Addr()
|
||||
if eq, visited := s.curPtrs.Push(px, py); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(px, py)
|
||||
}
|
||||
|
||||
// Rule 1: Check whether an option applies on this node in the value tree.
|
||||
if s.tryOptions(t, vx, vy) {
|
||||
return
|
||||
}
|
||||
|
||||
// Rule 2: Check whether the type has a valid Equal method.
|
||||
if s.tryMethod(t, vx, vy) {
|
||||
return
|
||||
}
|
||||
|
||||
// Rule 3: Compare based on the underlying kind.
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
s.report(vx.Bool() == vy.Bool(), 0)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
s.report(vx.Int() == vy.Int(), 0)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
s.report(vx.Uint() == vy.Uint(), 0)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
s.report(vx.Float() == vy.Float(), 0)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
s.report(vx.Complex() == vy.Complex(), 0)
|
||||
case reflect.String:
|
||||
s.report(vx.String() == vy.String(), 0)
|
||||
case reflect.Chan, reflect.UnsafePointer:
|
||||
s.report(vx.Pointer() == vy.Pointer(), 0)
|
||||
case reflect.Func:
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
case reflect.Struct:
|
||||
s.compareStruct(t, vx, vy)
|
||||
case reflect.Slice, reflect.Array:
|
||||
s.compareSlice(t, vx, vy)
|
||||
case reflect.Map:
|
||||
s.compareMap(t, vx, vy)
|
||||
case reflect.Ptr:
|
||||
s.comparePtr(t, vx, vy)
|
||||
case reflect.Interface:
|
||||
s.compareInterface(t, vx, vy)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", t.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
// Evaluate all filters and apply the remaining options.
|
||||
if opt := s.opts.filter(s, t, vx, vy); opt != nil {
|
||||
opt.apply(s, vx, vy)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
// Check if this type even has an Equal method.
|
||||
m, ok := t.MethodByName("Equal")
|
||||
if !ok || !function.IsType(m.Type, function.EqualAssignable) {
|
||||
return false
|
||||
}
|
||||
|
||||
eq := s.callTTBFunc(m.Func, vx, vy)
|
||||
s.report(eq, reportByMethod)
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{v})[0]
|
||||
}
|
||||
|
||||
// Run the function twice and ensure that we get the same results back.
|
||||
// We run in goroutines so that the race detector (if enabled) can detect
|
||||
// unsafe mutations to the input.
|
||||
c := make(chan reflect.Value)
|
||||
go detectRaces(c, f, v)
|
||||
got := <-c
|
||||
want := f.Call([]reflect.Value{v})[0]
|
||||
if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
|
||||
// To avoid false-positives with non-reflexive equality operations,
|
||||
// we sanity check whether a value is equal to itself.
|
||||
if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
|
||||
return want
|
||||
}
|
||||
panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
|
||||
}
|
||||
return want
|
||||
}
|
||||
|
||||
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
}
|
||||
|
||||
// Swapping the input arguments is sufficient to check that
|
||||
// f is symmetric and deterministic.
|
||||
// We run in goroutines so that the race detector (if enabled) can detect
|
||||
// unsafe mutations to the input.
|
||||
c := make(chan reflect.Value)
|
||||
go detectRaces(c, f, y, x)
|
||||
got := <-c
|
||||
want := f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
if !got.IsValid() || got.Bool() != want {
|
||||
panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
|
||||
}
|
||||
return want
|
||||
}
|
||||
|
||||
func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
|
||||
var ret reflect.Value
|
||||
defer func() {
|
||||
recover() // Ignore panics, let the other call to f panic instead
|
||||
c <- ret
|
||||
}()
|
||||
ret = f.Call(vs)[0]
|
||||
}
|
||||
|
||||
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
||||
var addr bool
|
||||
var vax, vay reflect.Value // Addressable versions of vx and vy
|
||||
|
||||
var mayForce, mayForceInit bool
|
||||
step := StructField{&structField{}}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
step.typ = t.Field(i).Type
|
||||
step.vx = vx.Field(i)
|
||||
step.vy = vy.Field(i)
|
||||
step.name = t.Field(i).Name
|
||||
step.idx = i
|
||||
step.unexported = !isExported(step.name)
|
||||
if step.unexported {
|
||||
if step.name == "_" {
|
||||
continue
|
||||
}
|
||||
// Defer checking of unexported fields until later to give an
|
||||
// Ignore a chance to ignore the field.
|
||||
if !vax.IsValid() || !vay.IsValid() {
|
||||
// For retrieveUnexportedField to work, the parent struct must
|
||||
// be addressable. Create a new copy of the values if
|
||||
// necessary to make them addressable.
|
||||
addr = vx.CanAddr() || vy.CanAddr()
|
||||
vax = makeAddressable(vx)
|
||||
vay = makeAddressable(vy)
|
||||
}
|
||||
if !mayForceInit {
|
||||
for _, xf := range s.exporters {
|
||||
mayForce = mayForce || xf(t)
|
||||
}
|
||||
mayForceInit = true
|
||||
}
|
||||
step.mayForce = mayForce
|
||||
step.paddr = addr
|
||||
step.pvx = vax
|
||||
step.pvy = vay
|
||||
step.field = t.Field(i)
|
||||
}
|
||||
s.compareAny(step)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
|
||||
isSlice := t.Kind() == reflect.Slice
|
||||
if isSlice && (vx.IsNil() || vy.IsNil()) {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
|
||||
// since slices represents a list of pointers, rather than a single pointer.
|
||||
// The pointer checking logic must be handled on a per-element basis
|
||||
// in compareAny.
|
||||
//
|
||||
// A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
|
||||
// pointer P, a length N, and a capacity C. Supposing each slice element has
|
||||
// a memory size of M, then the slice is equivalent to the list of pointers:
|
||||
// [P+i*M for i in range(N)]
|
||||
//
|
||||
// For example, v[:0] and v[:1] are slices with the same starting pointer,
|
||||
// but they are clearly different values. Using the slice pointer alone
|
||||
// violates the assumption that equal pointers implies equal values.
|
||||
|
||||
step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
|
||||
withIndexes := func(ix, iy int) SliceIndex {
|
||||
if ix >= 0 {
|
||||
step.vx, step.xkey = vx.Index(ix), ix
|
||||
} else {
|
||||
step.vx, step.xkey = reflect.Value{}, -1
|
||||
}
|
||||
if iy >= 0 {
|
||||
step.vy, step.ykey = vy.Index(iy), iy
|
||||
} else {
|
||||
step.vy, step.ykey = reflect.Value{}, -1
|
||||
}
|
||||
return step
|
||||
}
|
||||
|
||||
// Ignore options are able to ignore missing elements in a slice.
|
||||
// However, detecting these reliably requires an optimal differencing
|
||||
// algorithm, for which diff.Difference is not.
|
||||
//
|
||||
// Instead, we first iterate through both slices to detect which elements
|
||||
// would be ignored if standing alone. The index of non-discarded elements
|
||||
// are stored in a separate slice, which diffing is then performed on.
|
||||
var indexesX, indexesY []int
|
||||
var ignoredX, ignoredY []bool
|
||||
for ix := 0; ix < vx.Len(); ix++ {
|
||||
ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
|
||||
if !ignored {
|
||||
indexesX = append(indexesX, ix)
|
||||
}
|
||||
ignoredX = append(ignoredX, ignored)
|
||||
}
|
||||
for iy := 0; iy < vy.Len(); iy++ {
|
||||
ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
|
||||
if !ignored {
|
||||
indexesY = append(indexesY, iy)
|
||||
}
|
||||
ignoredY = append(ignoredY, ignored)
|
||||
}
|
||||
|
||||
// Compute an edit-script for slices vx and vy (excluding ignored elements).
|
||||
edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
|
||||
return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
|
||||
})
|
||||
|
||||
// Replay the ignore-scripts and the edit-script.
|
||||
var ix, iy int
|
||||
for ix < vx.Len() || iy < vy.Len() {
|
||||
var e diff.EditType
|
||||
switch {
|
||||
case ix < len(ignoredX) && ignoredX[ix]:
|
||||
e = diff.UniqueX
|
||||
case iy < len(ignoredY) && ignoredY[iy]:
|
||||
e = diff.UniqueY
|
||||
default:
|
||||
e, edits = edits[0], edits[1:]
|
||||
}
|
||||
switch e {
|
||||
case diff.UniqueX:
|
||||
s.compareAny(withIndexes(ix, -1))
|
||||
ix++
|
||||
case diff.UniqueY:
|
||||
s.compareAny(withIndexes(-1, iy))
|
||||
iy++
|
||||
default:
|
||||
s.compareAny(withIndexes(ix, iy))
|
||||
ix++
|
||||
iy++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle-detection for maps.
|
||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(vx, vy)
|
||||
|
||||
// We combine and sort the two map keys so that we can perform the
|
||||
// comparisons in a deterministic order.
|
||||
step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
|
||||
for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
|
||||
step.vx = vx.MapIndex(k)
|
||||
step.vy = vy.MapIndex(k)
|
||||
step.key = k
|
||||
if !step.vx.IsValid() && !step.vy.IsValid() {
|
||||
// It is possible for both vx and vy to be invalid if the
|
||||
// key contained a NaN value in it.
|
||||
//
|
||||
// Even with the ability to retrieve NaN keys in Go 1.12,
|
||||
// there still isn't a sensible way to compare the values since
|
||||
// a NaN key may map to multiple unordered values.
|
||||
// The most reasonable way to compare NaNs would be to compare the
|
||||
// set of values. However, this is impossible to do efficiently
|
||||
// since set equality is provably an O(n^2) operation given only
|
||||
// an Equal function. If we had a Less function or Hash function,
|
||||
// this could be done in O(n*log(n)) or O(n), respectively.
|
||||
//
|
||||
// Rather than adding complex logic to deal with NaNs, make it
|
||||
// the user's responsibility to compare such obscure maps.
|
||||
const help = "consider providing a Comparer to compare the map"
|
||||
panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
|
||||
}
|
||||
s.compareAny(step)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle-detection for pointers.
|
||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(vx, vy)
|
||||
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
|
||||
}
|
||||
|
||||
func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
if vx.Type() != vy.Type() {
|
||||
s.report(false, 0)
|
||||
return
|
||||
}
|
||||
s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
|
||||
}
|
||||
|
||||
func (s *state) report(eq bool, rf resultFlags) {
|
||||
if rf&reportByIgnore == 0 {
|
||||
if eq {
|
||||
s.result.NumSame++
|
||||
rf |= reportEqual
|
||||
} else {
|
||||
s.result.NumDiff++
|
||||
rf |= reportUnequal
|
||||
}
|
||||
}
|
||||
for _, r := range s.reporters {
|
||||
r.Report(Result{flags: rf})
|
||||
}
|
||||
}
|
||||
|
||||
// recChecker tracks the state needed to periodically perform checks that
|
||||
// user provided transformers are not stuck in an infinitely recursive cycle.
|
||||
type recChecker struct{ next int }
|
||||
|
||||
// Check scans the Path for any recursive transformers and panics when any
|
||||
// recursive transformers are detected. Note that the presence of a
|
||||
// recursive Transformer does not necessarily imply an infinite cycle.
|
||||
// As such, this check only activates after some minimal number of path steps.
|
||||
func (rc *recChecker) Check(p Path) {
|
||||
const minLen = 1 << 16
|
||||
if rc.next == 0 {
|
||||
rc.next = minLen
|
||||
}
|
||||
if len(p) < rc.next {
|
||||
return
|
||||
}
|
||||
rc.next <<= 1
|
||||
|
||||
// Check whether the same transformer has appeared at least twice.
|
||||
var ss []string
|
||||
m := map[Option]int{}
|
||||
for _, ps := range p {
|
||||
if t, ok := ps.(Transform); ok {
|
||||
t := t.Option()
|
||||
if m[t] == 1 { // Transformer was used exactly once before
|
||||
tf := t.(*transformer).fnc.Type()
|
||||
ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
|
||||
}
|
||||
m[t]++
|
||||
}
|
||||
}
|
||||
if len(ss) > 0 {
|
||||
const warning = "recursive set of Transformers detected"
|
||||
const help = "consider using cmpopts.AcyclicTransformer"
|
||||
set := strings.Join(ss, "\n\t")
|
||||
panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
|
||||
}
|
||||
}
|
||||
|
||||
// dynChecker tracks the state needed to periodically perform checks that
|
||||
// user provided functions are symmetric and deterministic.
|
||||
// The zero value is safe for immediate use.
|
||||
type dynChecker struct{ curr, next int }
|
||||
|
||||
// Next increments the state and reports whether a check should be performed.
|
||||
//
|
||||
// Checks occur every Nth function call, where N is a triangular number:
|
||||
//
|
||||
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
|
||||
//
|
||||
// See https://en.wikipedia.org/wiki/Triangular_number
|
||||
//
|
||||
// This sequence ensures that the cost of checks drops significantly as
|
||||
// the number of functions calls grows larger.
|
||||
func (dc *dynChecker) Next() bool {
|
||||
ok := dc.curr == dc.next
|
||||
if ok {
|
||||
dc.curr = 0
|
||||
dc.next++
|
||||
}
|
||||
dc.curr++
|
||||
return ok
|
||||
}
|
||||
|
||||
// makeAddressable returns a value that is always addressable.
|
||||
// It returns the input verbatim if it is already addressable,
|
||||
// otherwise it creates a new value and returns an addressable copy.
|
||||
func makeAddressable(v reflect.Value) reflect.Value {
|
||||
if v.CanAddr() {
|
||||
return v
|
||||
}
|
||||
vc := reflect.New(v.Type()).Elem()
|
||||
vc.Set(v)
|
||||
return vc
|
||||
}
|
||||
31
vendor/github.com/google/go-cmp/cmp/export.go
generated
vendored
Normal file
31
vendor/github.com/google/go-cmp/cmp/export.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
|
||||
// a struct such that the value has read-write permissions.
|
||||
//
|
||||
// The parent struct, v, must be addressable, while f must be a StructField
|
||||
// describing the field to retrieve. If addr is false,
|
||||
// then the returned value will be shallowed copied to be non-addressable.
|
||||
func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
|
||||
ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
|
||||
if !addr {
|
||||
// A field is addressable if and only if the struct is addressable.
|
||||
// If the original parent value was not addressable, shallow copy the
|
||||
// value to make it non-addressable to avoid leaking an implementation
|
||||
// detail of how forcibly exporting a field works.
|
||||
if ve.Kind() == reflect.Interface && ve.IsNil() {
|
||||
return reflect.Zero(f.Type)
|
||||
}
|
||||
return reflect.ValueOf(ve.Interface()).Convert(f.Type)
|
||||
}
|
||||
return ve
|
||||
}
|
||||
18
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
Normal file
18
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !cmp_debug
|
||||
// +build !cmp_debug
|
||||
|
||||
package diff
|
||||
|
||||
var debug debugger
|
||||
|
||||
type debugger struct{}
|
||||
|
||||
func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
|
||||
return f
|
||||
}
|
||||
func (debugger) Update() {}
|
||||
func (debugger) Finish() {}
|
||||
123
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
Normal file
123
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build cmp_debug
|
||||
// +build cmp_debug
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The algorithm can be seen running in real-time by enabling debugging:
|
||||
// go test -tags=cmp_debug -v
|
||||
//
|
||||
// Example output:
|
||||
// === RUN TestDifference/#34
|
||||
// ┌───────────────────────────────┐
|
||||
// │ \ · · · · · · · · · · · · · · │
|
||||
// │ · # · · · · · · · · · · · · · │
|
||||
// │ · \ · · · · · · · · · · · · · │
|
||||
// │ · · \ · · · · · · · · · · · · │
|
||||
// │ · · · X # · · · · · · · · · · │
|
||||
// │ · · · # \ · · · · · · · · · · │
|
||||
// │ · · · · · # # · · · · · · · · │
|
||||
// │ · · · · · # \ · · · · · · · · │
|
||||
// │ · · · · · · · \ · · · · · · · │
|
||||
// │ · · · · · · · · \ · · · · · · │
|
||||
// │ · · · · · · · · · \ · · · · · │
|
||||
// │ · · · · · · · · · · \ · · # · │
|
||||
// │ · · · · · · · · · · · \ # # · │
|
||||
// │ · · · · · · · · · · · # # # · │
|
||||
// │ · · · · · · · · · · # # # # · │
|
||||
// │ · · · · · · · · · # # # # # · │
|
||||
// │ · · · · · · · · · · · · · · \ │
|
||||
// └───────────────────────────────┘
|
||||
// [.Y..M.XY......YXYXY.|]
|
||||
//
|
||||
// The grid represents the edit-graph where the horizontal axis represents
|
||||
// list X and the vertical axis represents list Y. The start of the two lists
|
||||
// is the top-left, while the ends are the bottom-right. The '·' represents
|
||||
// an unexplored node in the graph. The '\' indicates that the two symbols
|
||||
// from list X and Y are equal. The 'X' indicates that two symbols are similar
|
||||
// (but not exactly equal) to each other. The '#' indicates that the two symbols
|
||||
// are different (and not similar). The algorithm traverses this graph trying to
|
||||
// make the paths starting in the top-left and the bottom-right connect.
|
||||
//
|
||||
// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
|
||||
// the currently established path from the forward and reverse searches,
|
||||
// separated by a '|' character.
|
||||
|
||||
const (
|
||||
updateDelay = 100 * time.Millisecond
|
||||
finishDelay = 500 * time.Millisecond
|
||||
ansiTerminal = true // ANSI escape codes used to move terminal cursor
|
||||
)
|
||||
|
||||
var debug debugger
|
||||
|
||||
type debugger struct {
|
||||
sync.Mutex
|
||||
p1, p2 EditScript
|
||||
fwdPath, revPath *EditScript
|
||||
grid []byte
|
||||
lines int
|
||||
}
|
||||
|
||||
func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
|
||||
dbg.Lock()
|
||||
dbg.fwdPath, dbg.revPath = p1, p2
|
||||
top := "┌─" + strings.Repeat("──", nx) + "┐\n"
|
||||
row := "│ " + strings.Repeat("· ", nx) + "│\n"
|
||||
btm := "└─" + strings.Repeat("──", nx) + "┘\n"
|
||||
dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
|
||||
dbg.lines = strings.Count(dbg.String(), "\n")
|
||||
fmt.Print(dbg)
|
||||
|
||||
// Wrap the EqualFunc so that we can intercept each result.
|
||||
return func(ix, iy int) (r Result) {
|
||||
cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
|
||||
for i := range cell {
|
||||
cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
|
||||
}
|
||||
switch r = f(ix, iy); {
|
||||
case r.Equal():
|
||||
cell[0] = '\\'
|
||||
case r.Similar():
|
||||
cell[0] = 'X'
|
||||
default:
|
||||
cell[0] = '#'
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (dbg *debugger) Update() {
|
||||
dbg.print(updateDelay)
|
||||
}
|
||||
|
||||
func (dbg *debugger) Finish() {
|
||||
dbg.print(finishDelay)
|
||||
dbg.Unlock()
|
||||
}
|
||||
|
||||
func (dbg *debugger) String() string {
|
||||
dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
|
||||
for i := len(*dbg.revPath) - 1; i >= 0; i-- {
|
||||
dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
|
||||
}
|
||||
return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
|
||||
}
|
||||
|
||||
func (dbg *debugger) print(d time.Duration) {
|
||||
if ansiTerminal {
|
||||
fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
|
||||
}
|
||||
fmt.Print(dbg)
|
||||
time.Sleep(d)
|
||||
}
|
||||
402
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
Normal file
402
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
Normal file
@@ -0,0 +1,402 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package diff implements an algorithm for producing edit-scripts.
|
||||
// The edit-script is a sequence of operations needed to transform one list
|
||||
// of symbols into another (or vice-versa). The edits allowed are insertions,
|
||||
// deletions, and modifications. The summation of all edits is called the
|
||||
// Levenshtein distance as this problem is well-known in computer science.
|
||||
//
|
||||
// This package prioritizes performance over accuracy. That is, the run time
|
||||
// is more important than obtaining a minimal Levenshtein distance.
|
||||
package diff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
)
|
||||
|
||||
// EditType represents a single operation within an edit-script.
|
||||
type EditType uint8
|
||||
|
||||
const (
|
||||
// Identity indicates that a symbol pair is identical in both list X and Y.
|
||||
Identity EditType = iota
|
||||
// UniqueX indicates that a symbol only exists in X and not Y.
|
||||
UniqueX
|
||||
// UniqueY indicates that a symbol only exists in Y and not X.
|
||||
UniqueY
|
||||
// Modified indicates that a symbol pair is a modification of each other.
|
||||
Modified
|
||||
)
|
||||
|
||||
// EditScript represents the series of differences between two lists.
|
||||
type EditScript []EditType
|
||||
|
||||
// String returns a human-readable string representing the edit-script where
|
||||
// Identity, UniqueX, UniqueY, and Modified are represented by the
|
||||
// '.', 'X', 'Y', and 'M' characters, respectively.
|
||||
func (es EditScript) String() string {
|
||||
b := make([]byte, len(es))
|
||||
for i, e := range es {
|
||||
switch e {
|
||||
case Identity:
|
||||
b[i] = '.'
|
||||
case UniqueX:
|
||||
b[i] = 'X'
|
||||
case UniqueY:
|
||||
b[i] = 'Y'
|
||||
case Modified:
|
||||
b[i] = 'M'
|
||||
default:
|
||||
panic("invalid edit-type")
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// stats returns a histogram of the number of each type of edit operation.
|
||||
func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case Identity:
|
||||
s.NI++
|
||||
case UniqueX:
|
||||
s.NX++
|
||||
case UniqueY:
|
||||
s.NY++
|
||||
case Modified:
|
||||
s.NM++
|
||||
default:
|
||||
panic("invalid edit-type")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
|
||||
// lists X and Y are equal.
|
||||
func (es EditScript) Dist() int { return len(es) - es.stats().NI }
|
||||
|
||||
// LenX is the length of the X list.
|
||||
func (es EditScript) LenX() int { return len(es) - es.stats().NY }
|
||||
|
||||
// LenY is the length of the Y list.
|
||||
func (es EditScript) LenY() int { return len(es) - es.stats().NX }
|
||||
|
||||
// EqualFunc reports whether the symbols at indexes ix and iy are equal.
|
||||
// When called by Difference, the index is guaranteed to be within nx and ny.
|
||||
type EqualFunc func(ix int, iy int) Result
|
||||
|
||||
// Result is the result of comparison.
|
||||
// NumSame is the number of sub-elements that are equal.
|
||||
// NumDiff is the number of sub-elements that are not equal.
|
||||
type Result struct{ NumSame, NumDiff int }
|
||||
|
||||
// BoolResult returns a Result that is either Equal or not Equal.
|
||||
func BoolResult(b bool) Result {
|
||||
if b {
|
||||
return Result{NumSame: 1} // Equal, Similar
|
||||
} else {
|
||||
return Result{NumDiff: 2} // Not Equal, not Similar
|
||||
}
|
||||
}
|
||||
|
||||
// Equal indicates whether the symbols are equal. Two symbols are equal
|
||||
// if and only if NumDiff == 0. If Equal, then they are also Similar.
|
||||
func (r Result) Equal() bool { return r.NumDiff == 0 }
|
||||
|
||||
// Similar indicates whether two symbols are similar and may be represented
|
||||
// by using the Modified type. As a special case, we consider binary comparisons
|
||||
// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
|
||||
//
|
||||
// The exact ratio of NumSame to NumDiff to determine similarity may change.
|
||||
func (r Result) Similar() bool {
|
||||
// Use NumSame+1 to offset NumSame so that binary comparisons are similar.
|
||||
return r.NumSame+1 >= r.NumDiff
|
||||
}
|
||||
|
||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
|
||||
// Difference reports whether two lists of lengths nx and ny are equal
|
||||
// given the definition of equality provided as f.
|
||||
//
|
||||
// This function returns an edit-script, which is a sequence of operations
|
||||
// needed to convert one list into the other. The following invariants for
|
||||
// the edit-script are maintained:
|
||||
// - eq == (es.Dist()==0)
|
||||
// - nx == es.LenX()
|
||||
// - ny == es.LenY()
|
||||
//
|
||||
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
|
||||
// produces an edit-script with a minimal Levenshtein distance). This algorithm
|
||||
// favors performance over optimality. The exact output is not guaranteed to
|
||||
// be stable and may change over time.
|
||||
func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// This algorithm is based on traversing what is known as an "edit-graph".
|
||||
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
|
||||
// by Eugene W. Myers. Since D can be as large as N itself, this is
|
||||
// effectively O(N^2). Unlike the algorithm from that paper, we are not
|
||||
// interested in the optimal path, but at least some "decent" path.
|
||||
//
|
||||
// For example, let X and Y be lists of symbols:
|
||||
// X = [A B C A B B A]
|
||||
// Y = [C B A B A C]
|
||||
//
|
||||
// The edit-graph can be drawn as the following:
|
||||
// A B C A B B A
|
||||
// ┌─────────────┐
|
||||
// C │_|_|\|_|_|_|_│ 0
|
||||
// B │_|\|_|_|\|\|_│ 1
|
||||
// A │\|_|_|\|_|_|\│ 2
|
||||
// B │_|\|_|_|\|\|_│ 3
|
||||
// A │\|_|_|\|_|_|\│ 4
|
||||
// C │ | |\| | | | │ 5
|
||||
// └─────────────┘ 6
|
||||
// 0 1 2 3 4 5 6 7
|
||||
//
|
||||
// List X is written along the horizontal axis, while list Y is written
|
||||
// along the vertical axis. At any point on this grid, if the symbol in
|
||||
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
|
||||
// The goal of any minimal edit-script algorithm is to find a path from the
|
||||
// top-left corner to the bottom-right corner, while traveling through the
|
||||
// fewest horizontal or vertical edges.
|
||||
// A horizontal edge is equivalent to inserting a symbol from list X.
|
||||
// A vertical edge is equivalent to inserting a symbol from list Y.
|
||||
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
||||
|
||||
// Invariants:
|
||||
// - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||
// - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||
//
|
||||
// In general:
|
||||
// - fwdFrontier.X < revFrontier.X
|
||||
// - fwdFrontier.Y < revFrontier.Y
|
||||
//
|
||||
// Unless, it is time for the algorithm to terminate.
|
||||
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
|
||||
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
|
||||
fwdFrontier := fwdPath.point // Forward search frontier
|
||||
revFrontier := revPath.point // Reverse search frontier
|
||||
|
||||
// Search budget bounds the cost of searching for better paths.
|
||||
// The longest sequence of non-matching symbols that can be tolerated is
|
||||
// approximately the square-root of the search budget.
|
||||
searchBudget := 4 * (nx + ny) // O(n)
|
||||
|
||||
// Running the tests with the "cmp_debug" build tag prints a visualization
|
||||
// of the algorithm running in real-time. This is educational for
|
||||
// understanding how the algorithm works. See debug_enable.go.
|
||||
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
|
||||
|
||||
// The algorithm below is a greedy, meet-in-the-middle algorithm for
|
||||
// computing sub-optimal edit-scripts between two lists.
|
||||
//
|
||||
// The algorithm is approximately as follows:
|
||||
// - Searching for differences switches back-and-forth between
|
||||
// a search that starts at the beginning (the top-left corner), and
|
||||
// a search that starts at the end (the bottom-right corner).
|
||||
// The goal of the search is connect with the search
|
||||
// from the opposite corner.
|
||||
// - As we search, we build a path in a greedy manner,
|
||||
// where the first match seen is added to the path (this is sub-optimal,
|
||||
// but provides a decent result in practice). When matches are found,
|
||||
// we try the next pair of symbols in the lists and follow all matches
|
||||
// as far as possible.
|
||||
// - When searching for matches, we search along a diagonal going through
|
||||
// through the "frontier" point. If no matches are found,
|
||||
// we advance the frontier towards the opposite corner.
|
||||
// - This algorithm terminates when either the X coordinates or the
|
||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
||||
|
||||
// This algorithm is correct even if searching only in the forward direction
|
||||
// or in the reverse direction. We do both because it is commonly observed
|
||||
// that two lists commonly differ because elements were added to the front
|
||||
// or end of the other list.
|
||||
//
|
||||
// Non-deterministically start with either the forward or reverse direction
|
||||
// to introduce some deliberate instability so that we have the flexibility
|
||||
// to change this algorithm in the future.
|
||||
if flags.Deterministic || randBool {
|
||||
goto forwardSearch
|
||||
} else {
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
forwardSearch:
|
||||
{
|
||||
// Forward search from the beginning.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
z := zigzag(i)
|
||||
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
|
||||
switch {
|
||||
case p.X >= revPath.X || p.Y < fwdPath.Y:
|
||||
stop1 = true // Hit top-right corner
|
||||
case p.Y >= revPath.Y || p.X < fwdPath.X:
|
||||
stop2 = true // Hit bottom-left corner
|
||||
case f(p.X, p.Y).Equal():
|
||||
// Match found, so connect the path to this point.
|
||||
fwdPath.connect(p, f)
|
||||
fwdPath.append(Identity)
|
||||
// Follow sequence of matches as far as possible.
|
||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
||||
if !f(fwdPath.X, fwdPath.Y).Equal() {
|
||||
break
|
||||
}
|
||||
fwdPath.append(Identity)
|
||||
}
|
||||
fwdFrontier = fwdPath.point
|
||||
stop1, stop2 = true, true
|
||||
default:
|
||||
searchBudget-- // Match not found
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
// Advance the frontier towards reverse point.
|
||||
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
|
||||
fwdFrontier.X++
|
||||
} else {
|
||||
fwdFrontier.Y++
|
||||
}
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
reverseSearch:
|
||||
{
|
||||
// Reverse search from the end.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
z := zigzag(i)
|
||||
p := point{revFrontier.X - z, revFrontier.Y + z}
|
||||
switch {
|
||||
case fwdPath.X >= p.X || revPath.Y < p.Y:
|
||||
stop1 = true // Hit bottom-left corner
|
||||
case fwdPath.Y >= p.Y || revPath.X < p.X:
|
||||
stop2 = true // Hit top-right corner
|
||||
case f(p.X-1, p.Y-1).Equal():
|
||||
// Match found, so connect the path to this point.
|
||||
revPath.connect(p, f)
|
||||
revPath.append(Identity)
|
||||
// Follow sequence of matches as far as possible.
|
||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
||||
if !f(revPath.X-1, revPath.Y-1).Equal() {
|
||||
break
|
||||
}
|
||||
revPath.append(Identity)
|
||||
}
|
||||
revFrontier = revPath.point
|
||||
stop1, stop2 = true, true
|
||||
default:
|
||||
searchBudget-- // Match not found
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
// Advance the frontier towards forward point.
|
||||
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
|
||||
revFrontier.X--
|
||||
} else {
|
||||
revFrontier.Y--
|
||||
}
|
||||
goto forwardSearch
|
||||
}
|
||||
|
||||
finishSearch:
|
||||
// Join the forward and reverse paths and then append the reverse path.
|
||||
fwdPath.connect(revPath.point, f)
|
||||
for i := len(revPath.es) - 1; i >= 0; i-- {
|
||||
t := revPath.es[i]
|
||||
revPath.es = revPath.es[:i]
|
||||
fwdPath.append(t)
|
||||
}
|
||||
debug.Finish()
|
||||
return fwdPath.es
|
||||
}
|
||||
|
||||
type path struct {
|
||||
dir int // +1 if forward, -1 if reverse
|
||||
point // Leading point of the EditScript path
|
||||
es EditScript
|
||||
}
|
||||
|
||||
// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
|
||||
// to the edit-script to connect p.point to dst.
|
||||
func (p *path) connect(dst point, f EqualFunc) {
|
||||
if p.dir > 0 {
|
||||
// Connect in forward direction.
|
||||
for dst.X > p.X && dst.Y > p.Y {
|
||||
switch r := f(p.X, p.Y); {
|
||||
case r.Equal():
|
||||
p.append(Identity)
|
||||
case r.Similar():
|
||||
p.append(Modified)
|
||||
case dst.X-p.X >= dst.Y-p.Y:
|
||||
p.append(UniqueX)
|
||||
default:
|
||||
p.append(UniqueY)
|
||||
}
|
||||
}
|
||||
for dst.X > p.X {
|
||||
p.append(UniqueX)
|
||||
}
|
||||
for dst.Y > p.Y {
|
||||
p.append(UniqueY)
|
||||
}
|
||||
} else {
|
||||
// Connect in reverse direction.
|
||||
for p.X > dst.X && p.Y > dst.Y {
|
||||
switch r := f(p.X-1, p.Y-1); {
|
||||
case r.Equal():
|
||||
p.append(Identity)
|
||||
case r.Similar():
|
||||
p.append(Modified)
|
||||
case p.Y-dst.Y >= p.X-dst.X:
|
||||
p.append(UniqueY)
|
||||
default:
|
||||
p.append(UniqueX)
|
||||
}
|
||||
}
|
||||
for p.X > dst.X {
|
||||
p.append(UniqueX)
|
||||
}
|
||||
for p.Y > dst.Y {
|
||||
p.append(UniqueY)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *path) append(t EditType) {
|
||||
p.es = append(p.es, t)
|
||||
switch t {
|
||||
case Identity, Modified:
|
||||
p.add(p.dir, p.dir)
|
||||
case UniqueX:
|
||||
p.add(p.dir, 0)
|
||||
case UniqueY:
|
||||
p.add(0, p.dir)
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
|
||||
type point struct{ X, Y int }
|
||||
|
||||
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
|
||||
|
||||
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
|
||||
//
|
||||
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
|
||||
func zigzag(x int) int {
|
||||
if x&1 != 0 {
|
||||
x = ^x
|
||||
}
|
||||
return x >> 1
|
||||
}
|
||||
9
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
Normal file
9
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flags
|
||||
|
||||
// Deterministic controls whether the output of Diff should be deterministic.
|
||||
// This is only used for testing.
|
||||
var Deterministic bool
|
||||
99
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
Normal file
99
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package function provides functionality for identifying function types.
|
||||
package function
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type funcType int
|
||||
|
||||
const (
|
||||
_ funcType = iota
|
||||
|
||||
tbFunc // func(T) bool
|
||||
ttbFunc // func(T, T) bool
|
||||
trbFunc // func(T, R) bool
|
||||
tibFunc // func(T, I) bool
|
||||
trFunc // func(T) R
|
||||
|
||||
Equal = ttbFunc // func(T, T) bool
|
||||
EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
|
||||
Transformer = trFunc // func(T) R
|
||||
ValueFilter = ttbFunc // func(T, T) bool
|
||||
Less = ttbFunc // func(T, T) bool
|
||||
ValuePredicate = tbFunc // func(T) bool
|
||||
KeyValuePredicate = trbFunc // func(T, R) bool
|
||||
)
|
||||
|
||||
var boolType = reflect.TypeOf(true)
|
||||
|
||||
// IsType reports whether the reflect.Type is of the specified function type.
|
||||
func IsType(t reflect.Type, ft funcType) bool {
|
||||
if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
|
||||
return false
|
||||
}
|
||||
ni, no := t.NumIn(), t.NumOut()
|
||||
switch ft {
|
||||
case tbFunc: // func(T) bool
|
||||
if ni == 1 && no == 1 && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case ttbFunc: // func(T, T) bool
|
||||
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case trbFunc: // func(T, R) bool
|
||||
if ni == 2 && no == 1 && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case tibFunc: // func(T, I) bool
|
||||
if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case trFunc: // func(T) R
|
||||
if ni == 1 && no == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
|
||||
|
||||
// NameOf returns the name of the function value.
|
||||
func NameOf(v reflect.Value) string {
|
||||
fnc := runtime.FuncForPC(v.Pointer())
|
||||
if fnc == nil {
|
||||
return "<unknown>"
|
||||
}
|
||||
fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
|
||||
|
||||
// Method closures have a "-fm" suffix.
|
||||
fullName = strings.TrimSuffix(fullName, "-fm")
|
||||
|
||||
var name string
|
||||
for len(fullName) > 0 {
|
||||
inParen := strings.HasSuffix(fullName, ")")
|
||||
fullName = strings.TrimSuffix(fullName, ")")
|
||||
|
||||
s := lastIdentRx.FindString(fullName)
|
||||
if s == "" {
|
||||
break
|
||||
}
|
||||
name = s + "." + name
|
||||
fullName = strings.TrimSuffix(fullName, s)
|
||||
|
||||
if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
|
||||
fullName = fullName[:i]
|
||||
}
|
||||
fullName = strings.TrimSuffix(fullName, ".")
|
||||
}
|
||||
return strings.TrimSuffix(name, ".")
|
||||
}
|
||||
164
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
164
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
|
||||
// TypeString is nearly identical to reflect.Type.String,
|
||||
// but has an additional option to specify that full type names be used.
|
||||
func TypeString(t reflect.Type, qualified bool) string {
|
||||
return string(appendTypeName(nil, t, qualified, false))
|
||||
}
|
||||
|
||||
func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
|
||||
// BUG: Go reflection provides no way to disambiguate two named types
|
||||
// of the same name and within the same package,
|
||||
// but declared within the namespace of different functions.
|
||||
|
||||
// Use the "any" alias instead of "interface{}" for better readability.
|
||||
if t == anyType {
|
||||
return append(b, "any"...)
|
||||
}
|
||||
|
||||
// Named type.
|
||||
if t.Name() != "" {
|
||||
if qualified && t.PkgPath() != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, t.PkgPath()...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
b = append(b, t.Name()...)
|
||||
} else {
|
||||
b = append(b, t.String()...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Unnamed type.
|
||||
switch k := t.Kind(); k {
|
||||
case reflect.Bool, reflect.String, reflect.UnsafePointer,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
b = append(b, k.String()...)
|
||||
case reflect.Chan:
|
||||
if t.ChanDir() == reflect.RecvDir {
|
||||
b = append(b, "<-"...)
|
||||
}
|
||||
b = append(b, "chan"...)
|
||||
if t.ChanDir() == reflect.SendDir {
|
||||
b = append(b, "<-"...)
|
||||
}
|
||||
b = append(b, ' ')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Func:
|
||||
if !elideFunc {
|
||||
b = append(b, "func"...)
|
||||
}
|
||||
b = append(b, '(')
|
||||
for i := 0; i < t.NumIn(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
if i == t.NumIn()-1 && t.IsVariadic() {
|
||||
b = append(b, "..."...)
|
||||
b = appendTypeName(b, t.In(i).Elem(), qualified, false)
|
||||
} else {
|
||||
b = appendTypeName(b, t.In(i), qualified, false)
|
||||
}
|
||||
}
|
||||
b = append(b, ')')
|
||||
switch t.NumOut() {
|
||||
case 0:
|
||||
// Do nothing
|
||||
case 1:
|
||||
b = append(b, ' ')
|
||||
b = appendTypeName(b, t.Out(0), qualified, false)
|
||||
default:
|
||||
b = append(b, " ("...)
|
||||
for i := 0; i < t.NumOut(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
b = appendTypeName(b, t.Out(i), qualified, false)
|
||||
}
|
||||
b = append(b, ')')
|
||||
}
|
||||
case reflect.Struct:
|
||||
b = append(b, "struct{ "...)
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, "; "...)
|
||||
}
|
||||
sf := t.Field(i)
|
||||
if !sf.Anonymous {
|
||||
if qualified && sf.PkgPath != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, sf.PkgPath...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
}
|
||||
b = append(b, sf.Name...)
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = appendTypeName(b, sf.Type, qualified, false)
|
||||
if sf.Tag != "" {
|
||||
b = append(b, ' ')
|
||||
b = strconv.AppendQuote(b, string(sf.Tag))
|
||||
}
|
||||
}
|
||||
if b[len(b)-1] == ' ' {
|
||||
b = b[:len(b)-1]
|
||||
} else {
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = append(b, '}')
|
||||
case reflect.Slice, reflect.Array:
|
||||
b = append(b, '[')
|
||||
if k == reflect.Array {
|
||||
b = strconv.AppendUint(b, uint64(t.Len()), 10)
|
||||
}
|
||||
b = append(b, ']')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Map:
|
||||
b = append(b, "map["...)
|
||||
b = appendTypeName(b, t.Key(), qualified, false)
|
||||
b = append(b, ']')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Ptr:
|
||||
b = append(b, '*')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Interface:
|
||||
b = append(b, "interface{ "...)
|
||||
for i := 0; i < t.NumMethod(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, "; "...)
|
||||
}
|
||||
m := t.Method(i)
|
||||
if qualified && m.PkgPath != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, m.PkgPath...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
}
|
||||
b = append(b, m.Name...)
|
||||
b = appendTypeName(b, m.Type, qualified, true)
|
||||
}
|
||||
if b[len(b)-1] == ' ' {
|
||||
b = b[:len(b)-1]
|
||||
} else {
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = append(b, '}')
|
||||
default:
|
||||
panic("invalid kind: " + k.String())
|
||||
}
|
||||
return b
|
||||
}
|
||||
34
vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
generated
vendored
Normal file
34
vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2018, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
|
||||
type Pointer struct {
|
||||
p unsafe.Pointer
|
||||
t reflect.Type
|
||||
}
|
||||
|
||||
// PointerOf returns a Pointer from v, which must be a
|
||||
// reflect.Ptr, reflect.Slice, or reflect.Map.
|
||||
func PointerOf(v reflect.Value) Pointer {
|
||||
// The proper representation of a pointer is unsafe.Pointer,
|
||||
// which is necessary if the GC ever uses a moving collector.
|
||||
return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
|
||||
}
|
||||
|
||||
// IsNil reports whether the pointer is nil.
|
||||
func (p Pointer) IsNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
// Uintptr returns the pointer as a uintptr.
|
||||
func (p Pointer) Uintptr() uintptr {
|
||||
return uintptr(p.p)
|
||||
}
|
||||
106
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
Normal file
106
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// SortKeys sorts a list of map keys, deduplicating keys if necessary.
|
||||
// The type of each value must be comparable.
|
||||
func SortKeys(vs []reflect.Value) []reflect.Value {
|
||||
if len(vs) == 0 {
|
||||
return vs
|
||||
}
|
||||
|
||||
// Sort the map keys.
|
||||
sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
|
||||
|
||||
// Deduplicate keys (fails for NaNs).
|
||||
vs2 := vs[:1]
|
||||
for _, v := range vs[1:] {
|
||||
if isLess(vs2[len(vs2)-1], v) {
|
||||
vs2 = append(vs2, v)
|
||||
}
|
||||
}
|
||||
return vs2
|
||||
}
|
||||
|
||||
// isLess is a generic function for sorting arbitrary map keys.
|
||||
// The inputs must be of the same type and must be comparable.
|
||||
func isLess(x, y reflect.Value) bool {
|
||||
switch x.Type().Kind() {
|
||||
case reflect.Bool:
|
||||
return !x.Bool() && y.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return x.Int() < y.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return x.Uint() < y.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
// NOTE: This does not sort -0 as less than +0
|
||||
// since Go maps treat -0 and +0 as equal keys.
|
||||
fx, fy := x.Float(), y.Float()
|
||||
return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
cx, cy := x.Complex(), y.Complex()
|
||||
rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
|
||||
if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
|
||||
return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
|
||||
}
|
||||
return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
|
||||
case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
|
||||
return x.Pointer() < y.Pointer()
|
||||
case reflect.String:
|
||||
return x.String() < y.String()
|
||||
case reflect.Array:
|
||||
for i := 0; i < x.Len(); i++ {
|
||||
if isLess(x.Index(i), y.Index(i)) {
|
||||
return true
|
||||
}
|
||||
if isLess(y.Index(i), x.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Struct:
|
||||
for i := 0; i < x.NumField(); i++ {
|
||||
if isLess(x.Field(i), y.Field(i)) {
|
||||
return true
|
||||
}
|
||||
if isLess(y.Field(i), x.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Interface:
|
||||
vx, vy := x.Elem(), y.Elem()
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
return !vx.IsValid() && vy.IsValid()
|
||||
}
|
||||
tx, ty := vx.Type(), vy.Type()
|
||||
if tx == ty {
|
||||
return isLess(x.Elem(), y.Elem())
|
||||
}
|
||||
if tx.Kind() != ty.Kind() {
|
||||
return vx.Kind() < vy.Kind()
|
||||
}
|
||||
if tx.String() != ty.String() {
|
||||
return tx.String() < ty.String()
|
||||
}
|
||||
if tx.PkgPath() != ty.PkgPath() {
|
||||
return tx.PkgPath() < ty.PkgPath()
|
||||
}
|
||||
// This can happen in rare situations, so we fallback to just comparing
|
||||
// the unique pointer for a reflect.Type. This guarantees deterministic
|
||||
// ordering within a program, but it is obviously not stable.
|
||||
return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
|
||||
default:
|
||||
// Must be Func, Map, or Slice; which are not comparable.
|
||||
panic(fmt.Sprintf("%T is not comparable", x.Type()))
|
||||
}
|
||||
}
|
||||
554
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
Normal file
554
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
Normal file
@@ -0,0 +1,554 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
)
|
||||
|
||||
// Option configures for specific behavior of [Equal] and [Diff]. In particular,
|
||||
// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
|
||||
// configure how equality is determined.
|
||||
//
|
||||
// The fundamental options may be composed with filters ([FilterPath] and
|
||||
// [FilterValues]) to control the scope over which they are applied.
|
||||
//
|
||||
// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
|
||||
// for creating options that may be used with [Equal] and [Diff].
|
||||
type Option interface {
|
||||
// filter applies all filters and returns the option that remains.
|
||||
// Each option may only read s.curPath and call s.callTTBFunc.
|
||||
//
|
||||
// An Options is returned only if multiple comparers or transformers
|
||||
// can apply simultaneously and will only contain values of those types
|
||||
// or sub-Options containing values of those types.
|
||||
filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
|
||||
}
|
||||
|
||||
// applicableOption represents the following types:
|
||||
//
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Grouping: Options
|
||||
type applicableOption interface {
|
||||
Option
|
||||
|
||||
// apply executes the option, which may mutate s or panic.
|
||||
apply(s *state, vx, vy reflect.Value)
|
||||
}
|
||||
|
||||
// coreOption represents the following types:
|
||||
//
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Filters: *pathFilter | *valuesFilter
|
||||
type coreOption interface {
|
||||
Option
|
||||
isCore()
|
||||
}
|
||||
|
||||
type core struct{}
|
||||
|
||||
func (core) isCore() {}
|
||||
|
||||
// Options is a list of [Option] values that also satisfies the [Option] interface.
|
||||
// Helper comparison packages may return an Options value when packing multiple
|
||||
// [Option] values into a single [Option]. When this package processes an Options,
|
||||
// it will be implicitly expanded into a flat list.
|
||||
//
|
||||
// Applying a filter on an Options is equivalent to applying that same filter
|
||||
// on all individual options held within.
|
||||
type Options []Option
|
||||
|
||||
func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
|
||||
for _, opt := range opts {
|
||||
switch opt := opt.filter(s, t, vx, vy); opt.(type) {
|
||||
case ignore:
|
||||
return ignore{} // Only ignore can short-circuit evaluation
|
||||
case validator:
|
||||
out = validator{} // Takes precedence over comparer or transformer
|
||||
case *comparer, *transformer, Options:
|
||||
switch out.(type) {
|
||||
case nil:
|
||||
out = opt
|
||||
case validator:
|
||||
// Keep validator
|
||||
case *comparer, *transformer, Options:
|
||||
out = Options{out, opt} // Conflicting comparers or transformers
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (opts Options) apply(s *state, _, _ reflect.Value) {
|
||||
const warning = "ambiguous set of applicable options"
|
||||
const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
|
||||
var ss []string
|
||||
for _, opt := range flattenOptions(nil, opts) {
|
||||
ss = append(ss, fmt.Sprint(opt))
|
||||
}
|
||||
set := strings.Join(ss, "\n\t")
|
||||
panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
|
||||
}
|
||||
|
||||
func (opts Options) String() string {
|
||||
var ss []string
|
||||
for _, opt := range opts {
|
||||
ss = append(ss, fmt.Sprint(opt))
|
||||
}
|
||||
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
|
||||
}
|
||||
|
||||
// FilterPath returns a new [Option] where opt is only evaluated if filter f
|
||||
// returns true for the current [Path] in the value tree.
|
||||
//
|
||||
// This filter is called even if a slice element or map entry is missing and
|
||||
// provides an opportunity to ignore such cases. The filter function must be
|
||||
// symmetric such that the filter result is identical regardless of whether the
|
||||
// missing value is from x or y.
|
||||
//
|
||||
// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
|
||||
// a previously filtered [Option].
|
||||
func FilterPath(f func(Path) bool, opt Option) Option {
|
||||
if f == nil {
|
||||
panic("invalid path filter function")
|
||||
}
|
||||
if opt := normalizeOption(opt); opt != nil {
|
||||
return &pathFilter{fnc: f, opt: opt}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type pathFilter struct {
|
||||
core
|
||||
fnc func(Path) bool
|
||||
opt Option
|
||||
}
|
||||
|
||||
func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if f.fnc(s.curPath) {
|
||||
return f.opt.filter(s, t, vx, vy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f pathFilter) String() string {
|
||||
return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
|
||||
}
|
||||
|
||||
// FilterValues returns a new [Option] where opt is only evaluated if filter f,
|
||||
// which is a function of the form "func(T, T) bool", returns true for the
|
||||
// current pair of values being compared. If either value is invalid or
|
||||
// the type of the values is not assignable to T, then this filter implicitly
|
||||
// returns false.
|
||||
//
|
||||
// The filter function must be
|
||||
// symmetric (i.e., agnostic to the order of the inputs) and
|
||||
// deterministic (i.e., produces the same result when given the same inputs).
|
||||
// If T is an interface, it is possible that f is called with two values with
|
||||
// different concrete types that both implement T.
|
||||
//
|
||||
// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
|
||||
// a previously filtered [Option].
|
||||
func FilterValues(f interface{}, opt Option) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid values filter function: %T", f))
|
||||
}
|
||||
if opt := normalizeOption(opt); opt != nil {
|
||||
vf := &valuesFilter{fnc: v, opt: opt}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
vf.typ = ti
|
||||
}
|
||||
return vf
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type valuesFilter struct {
|
||||
core
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
opt Option
|
||||
}
|
||||
|
||||
func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
|
||||
return nil
|
||||
}
|
||||
if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
|
||||
return f.opt.filter(s, t, vx, vy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f valuesFilter) String() string {
|
||||
return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
|
||||
}
|
||||
|
||||
// Ignore is an [Option] that causes all comparisons to be ignored.
|
||||
// This value is intended to be combined with [FilterPath] or [FilterValues].
|
||||
// It is an error to pass an unfiltered Ignore option to [Equal].
|
||||
func Ignore() Option { return ignore{} }
|
||||
|
||||
type ignore struct{ core }
|
||||
|
||||
func (ignore) isFiltered() bool { return false }
|
||||
func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
|
||||
func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
|
||||
func (ignore) String() string { return "Ignore()" }
|
||||
|
||||
// validator is a sentinel Option type to indicate that some options could not
|
||||
// be evaluated due to unexported fields, missing slice elements, or
|
||||
// missing map entries. Both values are validator only for unexported fields.
|
||||
type validator struct{ core }
|
||||
|
||||
func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
return validator{}
|
||||
}
|
||||
if !vx.CanInterface() || !vy.CanInterface() {
|
||||
return validator{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (validator) apply(s *state, vx, vy reflect.Value) {
|
||||
// Implies missing slice element or map entry.
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
s.report(vx.IsValid() == vy.IsValid(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Unable to Interface implies unexported field without visibility access.
|
||||
if !vx.CanInterface() || !vy.CanInterface() {
|
||||
help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
|
||||
var name string
|
||||
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
|
||||
// Named type with unexported fields.
|
||||
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
|
||||
if _, ok := reflect.New(t).Interface().(error); ok {
|
||||
help = "consider using cmpopts.EquateErrors to compare error values"
|
||||
} else if t.Comparable() {
|
||||
help = "consider using cmpopts.EquateComparable to compare comparable Go types"
|
||||
}
|
||||
} else {
|
||||
// Unnamed type with unexported fields. Derive PkgPath from field.
|
||||
var pkgPath string
|
||||
for i := 0; i < t.NumField() && pkgPath == ""; i++ {
|
||||
pkgPath = t.Field(i).PkgPath
|
||||
}
|
||||
name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
|
||||
}
|
||||
panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
|
||||
}
|
||||
|
||||
panic("not reachable")
|
||||
}
|
||||
|
||||
// identRx represents a valid identifier according to the Go specification.
|
||||
const identRx = `[_\p{L}][_\p{L}\p{N}]*`
|
||||
|
||||
var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
|
||||
|
||||
// Transformer returns an [Option] that applies a transformation function that
|
||||
// converts values of a certain type into that of another.
|
||||
//
|
||||
// The transformer f must be a function "func(T) R" that converts values of
|
||||
// type T to those of type R and is implicitly filtered to input values
|
||||
// assignable to T. The transformer must not mutate T in any way.
|
||||
//
|
||||
// To help prevent some cases of infinite recursive cycles applying the
|
||||
// same transform to the output of itself (e.g., in the case where the
|
||||
// input and output types are the same), an implicit filter is added such that
|
||||
// a transformer is applicable only if that exact transformer is not already
|
||||
// in the tail of the [Path] since the last non-[Transform] step.
|
||||
// For situations where the implicit filter is still insufficient,
|
||||
// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
|
||||
// which adds a filter to prevent the transformer from
|
||||
// being recursively applied upon itself.
|
||||
//
|
||||
// The name is a user provided label that is used as the [Transform.Name] in the
|
||||
// transformation [PathStep] (and eventually shown in the [Diff] output).
|
||||
// The name must be a valid identifier or qualified identifier in Go syntax.
|
||||
// If empty, an arbitrary name is used.
|
||||
func Transformer(name string, f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid transformer function: %T", f))
|
||||
}
|
||||
if name == "" {
|
||||
name = function.NameOf(v)
|
||||
if !identsRx.MatchString(name) {
|
||||
name = "λ" // Lambda-symbol as placeholder name
|
||||
}
|
||||
} else if !identsRx.MatchString(name) {
|
||||
panic(fmt.Sprintf("invalid name: %q", name))
|
||||
}
|
||||
tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
tr.typ = ti
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
type transformer struct {
|
||||
core
|
||||
name string
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T) R
|
||||
}
|
||||
|
||||
func (tr *transformer) isFiltered() bool { return tr.typ != nil }
|
||||
|
||||
func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
for i := len(s.curPath) - 1; i >= 0; i-- {
|
||||
if t, ok := s.curPath[i].(Transform); !ok {
|
||||
break // Hit most recent non-Transform step
|
||||
} else if tr == t.trans {
|
||||
return nil // Cannot directly use same Transform
|
||||
}
|
||||
}
|
||||
if tr.typ == nil || t.AssignableTo(tr.typ) {
|
||||
return tr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
|
||||
step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
|
||||
vvx := s.callTRFunc(tr.fnc, vx, step)
|
||||
vvy := s.callTRFunc(tr.fnc, vy, step)
|
||||
step.vx, step.vy = vvx, vvy
|
||||
s.compareAny(step)
|
||||
}
|
||||
|
||||
func (tr transformer) String() string {
|
||||
return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
|
||||
}
|
||||
|
||||
// Comparer returns an [Option] that determines whether two values are equal
|
||||
// to each other.
|
||||
//
|
||||
// The comparer f must be a function "func(T, T) bool" and is implicitly
|
||||
// filtered to input values assignable to T. If T is an interface, it is
|
||||
// possible that f is called with two values of different concrete types that
|
||||
// both implement T.
|
||||
//
|
||||
// The equality function must be:
|
||||
// - Symmetric: equal(x, y) == equal(y, x)
|
||||
// - Deterministic: equal(x, y) == equal(x, y)
|
||||
// - Pure: equal(x, y) does not modify x or y
|
||||
func Comparer(f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid comparer function: %T", f))
|
||||
}
|
||||
cm := &comparer{fnc: v}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
cm.typ = ti
|
||||
}
|
||||
return cm
|
||||
}
|
||||
|
||||
type comparer struct {
|
||||
core
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
}
|
||||
|
||||
func (cm *comparer) isFiltered() bool { return cm.typ != nil }
|
||||
|
||||
func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
if cm.typ == nil || t.AssignableTo(cm.typ) {
|
||||
return cm
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
|
||||
eq := s.callTTBFunc(cm.fnc, vx, vy)
|
||||
s.report(eq, reportByFunc)
|
||||
}
|
||||
|
||||
func (cm comparer) String() string {
|
||||
return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
|
||||
}
|
||||
|
||||
// Exporter returns an [Option] that specifies whether [Equal] is allowed to
|
||||
// introspect into the unexported fields of certain struct types.
|
||||
//
|
||||
// Users of this option must understand that comparing on unexported fields
|
||||
// from external packages is not safe since changes in the internal
|
||||
// implementation of some external package may cause the result of [Equal]
|
||||
// to unexpectedly change. However, it may be valid to use this option on types
|
||||
// defined in an internal package where the semantic meaning of an unexported
|
||||
// field is in the control of the user.
|
||||
//
|
||||
// In many cases, a custom [Comparer] should be used instead that defines
|
||||
// equality as a function of the public API of a type rather than the underlying
|
||||
// unexported implementation.
|
||||
//
|
||||
// For example, the [reflect.Type] documentation defines equality to be determined
|
||||
// by the == operator on the interface (essentially performing a shallow pointer
|
||||
// comparison) and most attempts to compare *[regexp.Regexp] types are interested
|
||||
// in only checking that the regular expression strings are equal.
|
||||
// Both of these are accomplished using [Comparer] options:
|
||||
//
|
||||
// Comparer(func(x, y reflect.Type) bool { return x == y })
|
||||
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
|
||||
//
|
||||
// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
|
||||
// option can be used to ignore all unexported fields on specified struct types.
|
||||
func Exporter(f func(reflect.Type) bool) Option {
|
||||
return exporter(f)
|
||||
}
|
||||
|
||||
type exporter func(reflect.Type) bool
|
||||
|
||||
func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
|
||||
// unexported fields of the specified struct types.
|
||||
//
|
||||
// See [Exporter] for the proper use of this option.
|
||||
func AllowUnexported(types ...interface{}) Option {
|
||||
m := make(map[reflect.Type]bool)
|
||||
for _, typ := range types {
|
||||
t := reflect.TypeOf(typ)
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("invalid struct type: %T", typ))
|
||||
}
|
||||
m[t] = true
|
||||
}
|
||||
return exporter(func(t reflect.Type) bool { return m[t] })
|
||||
}
|
||||
|
||||
// Result represents the comparison result for a single node and
|
||||
// is provided by cmp when calling Report (see [Reporter]).
|
||||
type Result struct {
|
||||
_ [0]func() // Make Result incomparable
|
||||
flags resultFlags
|
||||
}
|
||||
|
||||
// Equal reports whether the node was determined to be equal or not.
|
||||
// As a special case, ignored nodes are considered equal.
|
||||
func (r Result) Equal() bool {
|
||||
return r.flags&(reportEqual|reportByIgnore) != 0
|
||||
}
|
||||
|
||||
// ByIgnore reports whether the node is equal because it was ignored.
|
||||
// This never reports true if [Result.Equal] reports false.
|
||||
func (r Result) ByIgnore() bool {
|
||||
return r.flags&reportByIgnore != 0
|
||||
}
|
||||
|
||||
// ByMethod reports whether the Equal method determined equality.
|
||||
func (r Result) ByMethod() bool {
|
||||
return r.flags&reportByMethod != 0
|
||||
}
|
||||
|
||||
// ByFunc reports whether a [Comparer] function determined equality.
|
||||
func (r Result) ByFunc() bool {
|
||||
return r.flags&reportByFunc != 0
|
||||
}
|
||||
|
||||
// ByCycle reports whether a reference cycle was detected.
|
||||
func (r Result) ByCycle() bool {
|
||||
return r.flags&reportByCycle != 0
|
||||
}
|
||||
|
||||
type resultFlags uint
|
||||
|
||||
const (
|
||||
_ resultFlags = (1 << iota) / 2
|
||||
|
||||
reportEqual
|
||||
reportUnequal
|
||||
reportByIgnore
|
||||
reportByMethod
|
||||
reportByFunc
|
||||
reportByCycle
|
||||
)
|
||||
|
||||
// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
|
||||
// the value trees, it calls PushStep as it descends into each node in the
|
||||
// tree and PopStep as it ascend out of the node. The leaves of the tree are
|
||||
// either compared (determined to be equal or not equal) or ignored and reported
|
||||
// as such by calling the Report method.
|
||||
func Reporter(r interface {
|
||||
// PushStep is called when a tree-traversal operation is performed.
|
||||
// The PathStep itself is only valid until the step is popped.
|
||||
// The PathStep.Values are valid for the duration of the entire traversal
|
||||
// and must not be mutated.
|
||||
//
|
||||
// Equal always calls PushStep at the start to provide an operation-less
|
||||
// PathStep used to report the root values.
|
||||
//
|
||||
// Within a slice, the exact set of inserted, removed, or modified elements
|
||||
// is unspecified and may change in future implementations.
|
||||
// The entries of a map are iterated through in an unspecified order.
|
||||
PushStep(PathStep)
|
||||
|
||||
// Report is called exactly once on leaf nodes to report whether the
|
||||
// comparison identified the node as equal, unequal, or ignored.
|
||||
// A leaf node is one that is immediately preceded by and followed by
|
||||
// a pair of PushStep and PopStep calls.
|
||||
Report(Result)
|
||||
|
||||
// PopStep ascends back up the value tree.
|
||||
// There is always a matching pop call for every push call.
|
||||
PopStep()
|
||||
}) Option {
|
||||
return reporter{r}
|
||||
}
|
||||
|
||||
type reporter struct{ reporterIface }
|
||||
type reporterIface interface {
|
||||
PushStep(PathStep)
|
||||
Report(Result)
|
||||
PopStep()
|
||||
}
|
||||
|
||||
func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// normalizeOption normalizes the input options such that all Options groups
|
||||
// are flattened and groups with a single element are reduced to that element.
|
||||
// Only coreOptions and Options containing coreOptions are allowed.
|
||||
func normalizeOption(src Option) Option {
|
||||
switch opts := flattenOptions(nil, Options{src}); len(opts) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return opts[0]
|
||||
default:
|
||||
return opts
|
||||
}
|
||||
}
|
||||
|
||||
// flattenOptions copies all options in src to dst as a flat list.
|
||||
// Only coreOptions and Options containing coreOptions are allowed.
|
||||
func flattenOptions(dst, src Options) Options {
|
||||
for _, opt := range src {
|
||||
switch opt := opt.(type) {
|
||||
case nil:
|
||||
continue
|
||||
case Options:
|
||||
dst = flattenOptions(dst, opt)
|
||||
case coreOption:
|
||||
dst = append(dst, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid option type: %T", opt))
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
390
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
Normal file
390
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
Normal file
@@ -0,0 +1,390 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// Path is a list of [PathStep] describing the sequence of operations to get
|
||||
// from some root type to the current position in the value tree.
|
||||
// The first Path element is always an operation-less [PathStep] that exists
|
||||
// simply to identify the initial type.
|
||||
//
|
||||
// When traversing structs with embedded structs, the embedded struct will
|
||||
// always be accessed as a field before traversing the fields of the
|
||||
// embedded struct themselves. That is, an exported field from the
|
||||
// embedded struct will never be accessed directly from the parent struct.
|
||||
type Path []PathStep
|
||||
|
||||
// PathStep is a union-type for specific operations to traverse
|
||||
// a value's tree structure. Users of this package never need to implement
|
||||
// these types as values of this type will be returned by this package.
|
||||
//
|
||||
// Implementations of this interface:
|
||||
// - [StructField]
|
||||
// - [SliceIndex]
|
||||
// - [MapIndex]
|
||||
// - [Indirect]
|
||||
// - [TypeAssertion]
|
||||
// - [Transform]
|
||||
type PathStep interface {
|
||||
String() string
|
||||
|
||||
// Type is the resulting type after performing the path step.
|
||||
Type() reflect.Type
|
||||
|
||||
// Values is the resulting values after performing the path step.
|
||||
// The type of each valid value is guaranteed to be identical to Type.
|
||||
//
|
||||
// In some cases, one or both may be invalid or have restrictions:
|
||||
// - For StructField, both are not interface-able if the current field
|
||||
// is unexported and the struct type is not explicitly permitted by
|
||||
// an Exporter to traverse unexported fields.
|
||||
// - For SliceIndex, one may be invalid if an element is missing from
|
||||
// either the x or y slice.
|
||||
// - For MapIndex, one may be invalid if an entry is missing from
|
||||
// either the x or y map.
|
||||
//
|
||||
// The provided values must not be mutated.
|
||||
Values() (vx, vy reflect.Value)
|
||||
}
|
||||
|
||||
var (
|
||||
_ PathStep = StructField{}
|
||||
_ PathStep = SliceIndex{}
|
||||
_ PathStep = MapIndex{}
|
||||
_ PathStep = Indirect{}
|
||||
_ PathStep = TypeAssertion{}
|
||||
_ PathStep = Transform{}
|
||||
)
|
||||
|
||||
func (pa *Path) push(s PathStep) {
|
||||
*pa = append(*pa, s)
|
||||
}
|
||||
|
||||
func (pa *Path) pop() {
|
||||
*pa = (*pa)[:len(*pa)-1]
|
||||
}
|
||||
|
||||
// Last returns the last [PathStep] in the Path.
|
||||
// If the path is empty, this returns a non-nil [PathStep]
|
||||
// that reports a nil [PathStep.Type].
|
||||
func (pa Path) Last() PathStep {
|
||||
return pa.Index(-1)
|
||||
}
|
||||
|
||||
// Index returns the ith step in the Path and supports negative indexing.
|
||||
// A negative index starts counting from the tail of the Path such that -1
|
||||
// refers to the last step, -2 refers to the second-to-last step, and so on.
|
||||
// If index is invalid, this returns a non-nil [PathStep]
|
||||
// that reports a nil [PathStep.Type].
|
||||
func (pa Path) Index(i int) PathStep {
|
||||
if i < 0 {
|
||||
i = len(pa) + i
|
||||
}
|
||||
if i < 0 || i >= len(pa) {
|
||||
return pathStep{}
|
||||
}
|
||||
return pa[i]
|
||||
}
|
||||
|
||||
// String returns the simplified path to a node.
|
||||
// The simplified path only contains struct field accesses.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// MyMap.MySlices.MyField
|
||||
func (pa Path) String() string {
|
||||
var ss []string
|
||||
for _, s := range pa {
|
||||
if _, ok := s.(StructField); ok {
|
||||
ss = append(ss, s.String())
|
||||
}
|
||||
}
|
||||
return strings.TrimPrefix(strings.Join(ss, ""), ".")
|
||||
}
|
||||
|
||||
// GoString returns the path to a specific node using Go syntax.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
|
||||
func (pa Path) GoString() string {
|
||||
var ssPre, ssPost []string
|
||||
var numIndirect int
|
||||
for i, s := range pa {
|
||||
var nextStep PathStep
|
||||
if i+1 < len(pa) {
|
||||
nextStep = pa[i+1]
|
||||
}
|
||||
switch s := s.(type) {
|
||||
case Indirect:
|
||||
numIndirect++
|
||||
pPre, pPost := "(", ")"
|
||||
switch nextStep.(type) {
|
||||
case Indirect:
|
||||
continue // Next step is indirection, so let them batch up
|
||||
case StructField:
|
||||
numIndirect-- // Automatic indirection on struct fields
|
||||
case nil:
|
||||
pPre, pPost = "", "" // Last step; no need for parenthesis
|
||||
}
|
||||
if numIndirect > 0 {
|
||||
ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
|
||||
ssPost = append(ssPost, pPost)
|
||||
}
|
||||
numIndirect = 0
|
||||
continue
|
||||
case Transform:
|
||||
ssPre = append(ssPre, s.trans.name+"(")
|
||||
ssPost = append(ssPost, ")")
|
||||
continue
|
||||
}
|
||||
ssPost = append(ssPost, s.String())
|
||||
}
|
||||
for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
|
||||
ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
|
||||
}
|
||||
return strings.Join(ssPre, "") + strings.Join(ssPost, "")
|
||||
}
|
||||
|
||||
type pathStep struct {
|
||||
typ reflect.Type
|
||||
vx, vy reflect.Value
|
||||
}
|
||||
|
||||
func (ps pathStep) Type() reflect.Type { return ps.typ }
|
||||
func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
|
||||
func (ps pathStep) String() string {
|
||||
if ps.typ == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
s := value.TypeString(ps.typ, false)
|
||||
if s == "" || strings.ContainsAny(s, "{}\n") {
|
||||
return "root" // Type too simple or complex to print
|
||||
}
|
||||
return fmt.Sprintf("{%s}", s)
|
||||
}
|
||||
|
||||
// StructField is a [PathStep] that represents a struct field access
|
||||
// on a field called [StructField.Name].
|
||||
type StructField struct{ *structField }
|
||||
type structField struct {
|
||||
pathStep
|
||||
name string
|
||||
idx int
|
||||
|
||||
// These fields are used for forcibly accessing an unexported field.
|
||||
// pvx, pvy, and field are only valid if unexported is true.
|
||||
unexported bool
|
||||
mayForce bool // Forcibly allow visibility
|
||||
paddr bool // Was parent addressable?
|
||||
pvx, pvy reflect.Value // Parent values (always addressable)
|
||||
field reflect.StructField // Field information
|
||||
}
|
||||
|
||||
func (sf StructField) Type() reflect.Type { return sf.typ }
|
||||
func (sf StructField) Values() (vx, vy reflect.Value) {
|
||||
if !sf.unexported {
|
||||
return sf.vx, sf.vy // CanInterface reports true
|
||||
}
|
||||
|
||||
// Forcibly obtain read-write access to an unexported struct field.
|
||||
if sf.mayForce {
|
||||
vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
|
||||
vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
|
||||
return vx, vy // CanInterface reports true
|
||||
}
|
||||
return sf.vx, sf.vy // CanInterface reports false
|
||||
}
|
||||
func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
|
||||
|
||||
// Name is the field name.
|
||||
func (sf StructField) Name() string { return sf.name }
|
||||
|
||||
// Index is the index of the field in the parent struct type.
|
||||
// See [reflect.Type.Field].
|
||||
func (sf StructField) Index() int { return sf.idx }
|
||||
|
||||
// SliceIndex is a [PathStep] that represents an index operation on
|
||||
// a slice or array at some index [SliceIndex.Key].
|
||||
type SliceIndex struct{ *sliceIndex }
|
||||
type sliceIndex struct {
|
||||
pathStep
|
||||
xkey, ykey int
|
||||
isSlice bool // False for reflect.Array
|
||||
}
|
||||
|
||||
func (si SliceIndex) Type() reflect.Type { return si.typ }
|
||||
func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
|
||||
func (si SliceIndex) String() string {
|
||||
switch {
|
||||
case si.xkey == si.ykey:
|
||||
return fmt.Sprintf("[%d]", si.xkey)
|
||||
case si.ykey == -1:
|
||||
// [5->?] means "I don't know where X[5] went"
|
||||
return fmt.Sprintf("[%d->?]", si.xkey)
|
||||
case si.xkey == -1:
|
||||
// [?->3] means "I don't know where Y[3] came from"
|
||||
return fmt.Sprintf("[?->%d]", si.ykey)
|
||||
default:
|
||||
// [5->3] means "X[5] moved to Y[3]"
|
||||
return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
|
||||
}
|
||||
}
|
||||
|
||||
// Key is the index key; it may return -1 if in a split state
|
||||
func (si SliceIndex) Key() int {
|
||||
if si.xkey != si.ykey {
|
||||
return -1
|
||||
}
|
||||
return si.xkey
|
||||
}
|
||||
|
||||
// SplitKeys are the indexes for indexing into slices in the
|
||||
// x and y values, respectively. These indexes may differ due to the
|
||||
// insertion or removal of an element in one of the slices, causing
|
||||
// all of the indexes to be shifted. If an index is -1, then that
|
||||
// indicates that the element does not exist in the associated slice.
|
||||
//
|
||||
// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
|
||||
// returned by SplitKeys are not the same. SplitKeys will never return -1 for
|
||||
// both indexes.
|
||||
func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
|
||||
|
||||
// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
|
||||
type MapIndex struct{ *mapIndex }
|
||||
type mapIndex struct {
|
||||
pathStep
|
||||
key reflect.Value
|
||||
}
|
||||
|
||||
func (mi MapIndex) Type() reflect.Type { return mi.typ }
|
||||
func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
|
||||
func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
|
||||
|
||||
// Key is the value of the map key.
|
||||
func (mi MapIndex) Key() reflect.Value { return mi.key }
|
||||
|
||||
// Indirect is a [PathStep] that represents pointer indirection on the parent type.
|
||||
type Indirect struct{ *indirect }
|
||||
type indirect struct {
|
||||
pathStep
|
||||
}
|
||||
|
||||
func (in Indirect) Type() reflect.Type { return in.typ }
|
||||
func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
|
||||
func (in Indirect) String() string { return "*" }
|
||||
|
||||
// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
|
||||
type TypeAssertion struct{ *typeAssertion }
|
||||
type typeAssertion struct {
|
||||
pathStep
|
||||
}
|
||||
|
||||
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
|
||||
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
|
||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
|
||||
|
||||
// Transform is a [PathStep] that represents a transformation
|
||||
// from the parent type to the current type.
|
||||
type Transform struct{ *transform }
|
||||
type transform struct {
|
||||
pathStep
|
||||
trans *transformer
|
||||
}
|
||||
|
||||
func (tf Transform) Type() reflect.Type { return tf.typ }
|
||||
func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
|
||||
func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
|
||||
|
||||
// Name is the name of the [Transformer].
|
||||
func (tf Transform) Name() string { return tf.trans.name }
|
||||
|
||||
// Func is the function pointer to the transformer function.
|
||||
func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
|
||||
|
||||
// Option returns the originally constructed [Transformer] option.
|
||||
// The == operator can be used to detect the exact option used.
|
||||
func (tf Transform) Option() Option { return tf.trans }
|
||||
|
||||
// pointerPath represents a dual-stack of pointers encountered when
|
||||
// recursively traversing the x and y values. This data structure supports
|
||||
// detection of cycles and determining whether the cycles are equal.
|
||||
// In Go, cycles can occur via pointers, slices, and maps.
|
||||
//
|
||||
// The pointerPath uses a map to represent a stack; where descension into a
|
||||
// pointer pushes the address onto the stack, and ascension from a pointer
|
||||
// pops the address from the stack. Thus, when traversing into a pointer from
|
||||
// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
|
||||
// by checking whether the pointer has already been visited. The cycle detection
|
||||
// uses a separate stack for the x and y values.
|
||||
//
|
||||
// If a cycle is detected we need to determine whether the two pointers
|
||||
// should be considered equal. The definition of equality chosen by Equal
|
||||
// requires two graphs to have the same structure. To determine this, both the
|
||||
// x and y values must have a cycle where the previous pointers were also
|
||||
// encountered together as a pair.
|
||||
//
|
||||
// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
|
||||
// MapIndex with pointer information for the x and y values.
|
||||
// Suppose px and py are two pointers to compare, we then search the
|
||||
// Path for whether px was ever encountered in the Path history of x, and
|
||||
// similarly so with py. If either side has a cycle, the comparison is only
|
||||
// equal if both px and py have a cycle resulting from the same PathStep.
|
||||
//
|
||||
// Using a map as a stack is more performant as we can perform cycle detection
|
||||
// in O(1) instead of O(N) where N is len(Path).
|
||||
type pointerPath struct {
|
||||
// mx is keyed by x pointers, where the value is the associated y pointer.
|
||||
mx map[value.Pointer]value.Pointer
|
||||
// my is keyed by y pointers, where the value is the associated x pointer.
|
||||
my map[value.Pointer]value.Pointer
|
||||
}
|
||||
|
||||
func (p *pointerPath) Init() {
|
||||
p.mx = make(map[value.Pointer]value.Pointer)
|
||||
p.my = make(map[value.Pointer]value.Pointer)
|
||||
}
|
||||
|
||||
// Push indicates intent to descend into pointers vx and vy where
|
||||
// visited reports whether either has been seen before. If visited before,
|
||||
// equal reports whether both pointers were encountered together.
|
||||
// Pop must be called if and only if the pointers were never visited.
|
||||
//
|
||||
// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
|
||||
// and be non-nil.
|
||||
func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
|
||||
px := value.PointerOf(vx)
|
||||
py := value.PointerOf(vy)
|
||||
_, ok1 := p.mx[px]
|
||||
_, ok2 := p.my[py]
|
||||
if ok1 || ok2 {
|
||||
equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
|
||||
return equal, true
|
||||
}
|
||||
p.mx[px] = py
|
||||
p.my[py] = px
|
||||
return false, false
|
||||
}
|
||||
|
||||
// Pop ascends from pointers vx and vy.
|
||||
func (p pointerPath) Pop(vx, vy reflect.Value) {
|
||||
delete(p.mx, value.PointerOf(vx))
|
||||
delete(p.my, value.PointerOf(vy))
|
||||
}
|
||||
|
||||
// isExported reports whether the identifier is exported.
|
||||
func isExported(id string) bool {
|
||||
r, _ := utf8.DecodeRuneInString(id)
|
||||
return unicode.IsUpper(r)
|
||||
}
|
||||
54
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
Normal file
54
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
// defaultReporter implements the reporter interface.
|
||||
//
|
||||
// As Equal serially calls the PushStep, Report, and PopStep methods, the
|
||||
// defaultReporter constructs a tree-based representation of the compared value
|
||||
// and the result of each comparison (see valueNode).
|
||||
//
|
||||
// When the String method is called, the FormatDiff method transforms the
|
||||
// valueNode tree into a textNode tree, which is a tree-based representation
|
||||
// of the textual output (see textNode).
|
||||
//
|
||||
// Lastly, the textNode.String method produces the final report as a string.
|
||||
type defaultReporter struct {
|
||||
root *valueNode
|
||||
curr *valueNode
|
||||
}
|
||||
|
||||
func (r *defaultReporter) PushStep(ps PathStep) {
|
||||
r.curr = r.curr.PushStep(ps)
|
||||
if r.root == nil {
|
||||
r.root = r.curr
|
||||
}
|
||||
}
|
||||
func (r *defaultReporter) Report(rs Result) {
|
||||
r.curr.Report(rs)
|
||||
}
|
||||
func (r *defaultReporter) PopStep() {
|
||||
r.curr = r.curr.PopStep()
|
||||
}
|
||||
|
||||
// String provides a full report of the differences detected as a structured
|
||||
// literal in pseudo-Go syntax. String may only be called after the entire tree
|
||||
// has been traversed.
|
||||
func (r *defaultReporter) String() string {
|
||||
assert(r.root != nil && r.curr == nil)
|
||||
if r.root.NumDiff == 0 {
|
||||
return ""
|
||||
}
|
||||
ptrs := new(pointerReferences)
|
||||
text := formatOptions{}.FormatDiff(r.root, ptrs)
|
||||
resolveReferences(text)
|
||||
return text.String()
|
||||
}
|
||||
|
||||
func assert(ok bool) {
|
||||
if !ok {
|
||||
panic("assertion failure")
|
||||
}
|
||||
}
|
||||
433
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
Normal file
433
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
Normal file
@@ -0,0 +1,433 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// numContextRecords is the number of surrounding equal records to print.
|
||||
const numContextRecords = 2
|
||||
|
||||
type diffMode byte
|
||||
|
||||
const (
|
||||
diffUnknown diffMode = 0
|
||||
diffIdentical diffMode = ' '
|
||||
diffRemoved diffMode = '-'
|
||||
diffInserted diffMode = '+'
|
||||
)
|
||||
|
||||
type typeMode int
|
||||
|
||||
const (
|
||||
// emitType always prints the type.
|
||||
emitType typeMode = iota
|
||||
// elideType never prints the type.
|
||||
elideType
|
||||
// autoType prints the type only for composite kinds
|
||||
// (i.e., structs, slices, arrays, and maps).
|
||||
autoType
|
||||
)
|
||||
|
||||
type formatOptions struct {
|
||||
// DiffMode controls the output mode of FormatDiff.
|
||||
//
|
||||
// If diffUnknown, then produce a diff of the x and y values.
|
||||
// If diffIdentical, then emit values as if they were equal.
|
||||
// If diffRemoved, then only emit x values (ignoring y values).
|
||||
// If diffInserted, then only emit y values (ignoring x values).
|
||||
DiffMode diffMode
|
||||
|
||||
// TypeMode controls whether to print the type for the current node.
|
||||
//
|
||||
// As a general rule of thumb, we always print the type of the next node
|
||||
// after an interface, and always elide the type of the next node after
|
||||
// a slice or map node.
|
||||
TypeMode typeMode
|
||||
|
||||
// formatValueOptions are options specific to printing reflect.Values.
|
||||
formatValueOptions
|
||||
}
|
||||
|
||||
func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
|
||||
opts.DiffMode = d
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
|
||||
opts.TypeMode = t
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) WithVerbosity(level int) formatOptions {
|
||||
opts.VerbosityLevel = level
|
||||
opts.LimitVerbosity = true
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) verbosity() uint {
|
||||
switch {
|
||||
case opts.VerbosityLevel < 0:
|
||||
return 0
|
||||
case opts.VerbosityLevel > 16:
|
||||
return 16 // some reasonable maximum to avoid shift overflow
|
||||
default:
|
||||
return uint(opts.VerbosityLevel)
|
||||
}
|
||||
}
|
||||
|
||||
const maxVerbosityPreset = 6
|
||||
|
||||
// verbosityPreset modifies the verbosity settings given an index
|
||||
// between 0 and maxVerbosityPreset, inclusive.
|
||||
func verbosityPreset(opts formatOptions, i int) formatOptions {
|
||||
opts.VerbosityLevel = int(opts.verbosity()) + 2*i
|
||||
if i > 0 {
|
||||
opts.AvoidStringer = true
|
||||
}
|
||||
if i >= maxVerbosityPreset {
|
||||
opts.PrintAddresses = true
|
||||
opts.QualifiedNames = true
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// FormatDiff converts a valueNode tree into a textNode tree, where the later
|
||||
// is a textual representation of the differences detected in the former.
|
||||
func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
|
||||
if opts.DiffMode == diffIdentical {
|
||||
opts = opts.WithVerbosity(1)
|
||||
} else if opts.verbosity() < 3 {
|
||||
opts = opts.WithVerbosity(3)
|
||||
}
|
||||
|
||||
// Check whether we have specialized formatting for this node.
|
||||
// This is not necessary, but helpful for producing more readable outputs.
|
||||
if opts.CanFormatDiffSlice(v) {
|
||||
return opts.FormatDiffSlice(v)
|
||||
}
|
||||
|
||||
var parentKind reflect.Kind
|
||||
if v.parent != nil && v.parent.TransformerName == "" {
|
||||
parentKind = v.parent.Type.Kind()
|
||||
}
|
||||
|
||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
||||
// As a special case, treat equal []byte as a leaf nodes.
|
||||
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
|
||||
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
|
||||
if v.MaxDepth == 0 || isEqualBytes {
|
||||
switch opts.DiffMode {
|
||||
case diffUnknown, diffIdentical:
|
||||
// Format Equal.
|
||||
if v.NumDiff == 0 {
|
||||
outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
if v.NumIgnored > 0 && v.NumSame == 0 {
|
||||
return textEllipsis
|
||||
} else if outx.Len() < outy.Len() {
|
||||
return outx
|
||||
} else {
|
||||
return outy
|
||||
}
|
||||
}
|
||||
|
||||
// Format unequal.
|
||||
assert(opts.DiffMode == diffUnknown)
|
||||
var list textList
|
||||
outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
|
||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||
opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
|
||||
outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
}
|
||||
if outx != nil {
|
||||
list = append(list, textRecord{Diff: '-', Value: outx})
|
||||
}
|
||||
if outy != nil {
|
||||
list = append(list, textRecord{Diff: '+', Value: outy})
|
||||
}
|
||||
return opts.WithTypeMode(emitType).FormatType(v.Type, list)
|
||||
case diffRemoved:
|
||||
return opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
case diffInserted:
|
||||
return opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
default:
|
||||
panic("invalid diff mode")
|
||||
}
|
||||
}
|
||||
|
||||
// Register slice element to support cycle detection.
|
||||
if parentKind == reflect.Slice {
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
|
||||
defer ptrs.Pop()
|
||||
defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
|
||||
}
|
||||
|
||||
// Descend into the child value node.
|
||||
if v.TransformerName != "" {
|
||||
out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||
out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
|
||||
return opts.FormatType(v.Type, out)
|
||||
} else {
|
||||
switch k := v.Type.Kind(); k {
|
||||
case reflect.Struct, reflect.Array, reflect.Slice:
|
||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||
out = opts.FormatType(v.Type, out)
|
||||
case reflect.Map:
|
||||
// Register map to support cycle detection.
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||
defer ptrs.Pop()
|
||||
|
||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||
out = wrapTrunkReferences(ptrRefs, out)
|
||||
out = opts.FormatType(v.Type, out)
|
||||
case reflect.Ptr:
|
||||
// Register pointer to support cycle detection.
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||
defer ptrs.Pop()
|
||||
|
||||
out = opts.FormatDiff(v.Value, ptrs)
|
||||
out = wrapTrunkReferences(ptrRefs, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
case reflect.Interface:
|
||||
out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v cannot have children", k))
|
||||
}
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
|
||||
// Derive record name based on the data structure kind.
|
||||
var name string
|
||||
var formatKey func(reflect.Value) string
|
||||
switch k {
|
||||
case reflect.Struct:
|
||||
name = "field"
|
||||
opts = opts.WithTypeMode(autoType)
|
||||
formatKey = func(v reflect.Value) string { return v.String() }
|
||||
case reflect.Slice, reflect.Array:
|
||||
name = "element"
|
||||
opts = opts.WithTypeMode(elideType)
|
||||
formatKey = func(reflect.Value) string { return "" }
|
||||
case reflect.Map:
|
||||
name = "entry"
|
||||
opts = opts.WithTypeMode(elideType)
|
||||
formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
|
||||
}
|
||||
|
||||
maxLen := -1
|
||||
if opts.LimitVerbosity {
|
||||
if opts.DiffMode == diffIdentical {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
} else {
|
||||
maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
|
||||
}
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
|
||||
// Handle unification.
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical, diffRemoved, diffInserted:
|
||||
var list textList
|
||||
var deferredEllipsis bool // Add final "..." to indicate records were dropped
|
||||
for _, r := range recs {
|
||||
if len(list) == maxLen {
|
||||
deferredEllipsis = true
|
||||
break
|
||||
}
|
||||
|
||||
// Elide struct fields that are zero value.
|
||||
if k == reflect.Struct {
|
||||
var isZero bool
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical:
|
||||
isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
|
||||
case diffRemoved:
|
||||
isZero = r.Value.ValueX.IsZero()
|
||||
case diffInserted:
|
||||
isZero = r.Value.ValueY.IsZero()
|
||||
}
|
||||
if isZero {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Elide ignored nodes.
|
||||
if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
|
||||
deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
|
||||
if !deferredEllipsis {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
}
|
||||
continue
|
||||
}
|
||||
if out := opts.FormatDiff(r.Value, ptrs); out != nil {
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
}
|
||||
}
|
||||
if deferredEllipsis {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
}
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
case diffUnknown:
|
||||
default:
|
||||
panic("invalid diff mode")
|
||||
}
|
||||
|
||||
// Handle differencing.
|
||||
var numDiffs int
|
||||
var list textList
|
||||
var keys []reflect.Value // invariant: len(list) == len(keys)
|
||||
groups := coalesceAdjacentRecords(name, recs)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
maxGroup = maxGroup.Append(ds)
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle equal records.
|
||||
if ds.NumDiff() == 0 {
|
||||
// Compute the number of leading and trailing records to print.
|
||||
var numLo, numHi int
|
||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
||||
for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
|
||||
if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
||||
break
|
||||
}
|
||||
numLo++
|
||||
}
|
||||
for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
||||
if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
||||
break
|
||||
}
|
||||
numHi++
|
||||
}
|
||||
if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
|
||||
numHi++ // Avoid pointless coalescing of a single equal record
|
||||
}
|
||||
|
||||
// Format the equal values.
|
||||
for _, r := range recs[:numLo] {
|
||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
if numEqual > numLo+numHi {
|
||||
ds.NumIdentical -= numLo + numHi
|
||||
list.AppendEllipsis(ds)
|
||||
for len(keys) < len(list) {
|
||||
keys = append(keys, reflect.Value{})
|
||||
}
|
||||
}
|
||||
for _, r := range recs[numEqual-numHi : numEqual] {
|
||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
recs = recs[numEqual:]
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle unequal records.
|
||||
for _, r := range recs[:ds.NumDiff()] {
|
||||
switch {
|
||||
case opts.CanFormatDiffSlice(r.Value):
|
||||
out := opts.FormatDiffSlice(r.Value)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
case r.Value.NumChildren == r.Value.MaxDepth:
|
||||
outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||
outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||
opts2 := verbosityPreset(opts, i)
|
||||
outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||
outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||
}
|
||||
if outx != nil {
|
||||
list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
if outy != nil {
|
||||
list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
default:
|
||||
out := opts.FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
}
|
||||
recs = recs[ds.NumDiff():]
|
||||
numDiffs += ds.NumDiff()
|
||||
}
|
||||
if maxGroup.IsZero() {
|
||||
assert(len(recs) == 0)
|
||||
} else {
|
||||
list.AppendEllipsis(maxGroup)
|
||||
for len(keys) < len(list) {
|
||||
keys = append(keys, reflect.Value{})
|
||||
}
|
||||
}
|
||||
assert(len(list) == len(keys))
|
||||
|
||||
// For maps, the default formatting logic uses fmt.Stringer which may
|
||||
// produce ambiguous output. Avoid calling String to disambiguate.
|
||||
if k == reflect.Map {
|
||||
var ambiguous bool
|
||||
seenKeys := map[string]reflect.Value{}
|
||||
for i, currKey := range keys {
|
||||
if currKey.IsValid() {
|
||||
strKey := list[i].Key
|
||||
prevKey, seen := seenKeys[strKey]
|
||||
if seen && prevKey.CanInterface() && currKey.CanInterface() {
|
||||
ambiguous = prevKey.Interface() != currKey.Interface()
|
||||
if ambiguous {
|
||||
break
|
||||
}
|
||||
}
|
||||
seenKeys[strKey] = currKey
|
||||
}
|
||||
}
|
||||
if ambiguous {
|
||||
for i, k := range keys {
|
||||
if k.IsValid() {
|
||||
list[i].Key = formatMapKey(k, true, ptrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
}
|
||||
|
||||
// coalesceAdjacentRecords coalesces the list of records into groups of
|
||||
// adjacent equal, or unequal counts.
|
||||
func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
|
||||
var prevCase int // Arbitrary index into which case last occurred
|
||||
lastStats := func(i int) *diffStats {
|
||||
if prevCase != i {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevCase = i
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, r := range recs {
|
||||
switch rv := r.Value; {
|
||||
case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
|
||||
lastStats(1).NumIgnored++
|
||||
case rv.NumDiff == 0:
|
||||
lastStats(1).NumIdentical++
|
||||
case rv.NumDiff > 0 && !rv.ValueY.IsValid():
|
||||
lastStats(2).NumRemoved++
|
||||
case rv.NumDiff > 0 && !rv.ValueX.IsValid():
|
||||
lastStats(2).NumInserted++
|
||||
default:
|
||||
lastStats(2).NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
}
|
||||
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
@@ -0,0 +1,264 @@
|
||||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
const (
|
||||
pointerDelimPrefix = "⟪"
|
||||
pointerDelimSuffix = "⟫"
|
||||
)
|
||||
|
||||
// formatPointer prints the address of the pointer.
|
||||
func formatPointer(p value.Pointer, withDelims bool) string {
|
||||
v := p.Uintptr()
|
||||
if flags.Deterministic {
|
||||
v = 0xdeadf00f // Only used for stable testing purposes
|
||||
}
|
||||
if withDelims {
|
||||
return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
|
||||
}
|
||||
return formatHex(uint64(v))
|
||||
}
|
||||
|
||||
// pointerReferences is a stack of pointers visited so far.
|
||||
type pointerReferences [][2]value.Pointer
|
||||
|
||||
func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
|
||||
if deref && vx.IsValid() {
|
||||
vx = vx.Addr()
|
||||
}
|
||||
if deref && vy.IsValid() {
|
||||
vy = vy.Addr()
|
||||
}
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
|
||||
case diffRemoved:
|
||||
pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
|
||||
case diffInserted:
|
||||
pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
|
||||
}
|
||||
*ps = append(*ps, pp)
|
||||
return pp
|
||||
}
|
||||
|
||||
func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
|
||||
p = value.PointerOf(v)
|
||||
for _, pp := range *ps {
|
||||
if p == pp[0] || p == pp[1] {
|
||||
return p, true
|
||||
}
|
||||
}
|
||||
*ps = append(*ps, [2]value.Pointer{p, p})
|
||||
return p, false
|
||||
}
|
||||
|
||||
func (ps *pointerReferences) Pop() {
|
||||
*ps = (*ps)[:len(*ps)-1]
|
||||
}
|
||||
|
||||
// trunkReferences is metadata for a textNode indicating that the sub-tree
|
||||
// represents the value for either pointer in a pair of references.
|
||||
type trunkReferences struct{ pp [2]value.Pointer }
|
||||
|
||||
// trunkReference is metadata for a textNode indicating that the sub-tree
|
||||
// represents the value for the given pointer reference.
|
||||
type trunkReference struct{ p value.Pointer }
|
||||
|
||||
// leafReference is metadata for a textNode indicating that the value is
|
||||
// truncated as it refers to another part of the tree (i.e., a trunk).
|
||||
type leafReference struct{ p value.Pointer }
|
||||
|
||||
func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
|
||||
switch {
|
||||
case pp[0].IsNil():
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
|
||||
case pp[1].IsNil():
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||
case pp[0] == pp[1]:
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||
default:
|
||||
return &textWrap{Value: s, Metadata: trunkReferences{pp}}
|
||||
}
|
||||
}
|
||||
func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
|
||||
var prefix string
|
||||
if printAddress {
|
||||
prefix = formatPointer(p, true)
|
||||
}
|
||||
return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
|
||||
}
|
||||
func makeLeafReference(p value.Pointer, printAddress bool) textNode {
|
||||
out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
|
||||
var prefix string
|
||||
if printAddress {
|
||||
prefix = formatPointer(p, true)
|
||||
}
|
||||
return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
|
||||
}
|
||||
|
||||
// resolveReferences walks the textNode tree searching for any leaf reference
|
||||
// metadata and resolves each against the corresponding trunk references.
|
||||
// Since pointer addresses in memory are not particularly readable to the user,
|
||||
// it replaces each pointer value with an arbitrary and unique reference ID.
|
||||
func resolveReferences(s textNode) {
|
||||
var walkNodes func(textNode, func(textNode))
|
||||
walkNodes = func(s textNode, f func(textNode)) {
|
||||
f(s)
|
||||
switch s := s.(type) {
|
||||
case *textWrap:
|
||||
walkNodes(s.Value, f)
|
||||
case textList:
|
||||
for _, r := range s {
|
||||
walkNodes(r.Value, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect all trunks and leaves with reference metadata.
|
||||
var trunks, leaves []*textWrap
|
||||
walkNodes(s, func(s textNode) {
|
||||
if s, ok := s.(*textWrap); ok {
|
||||
switch s.Metadata.(type) {
|
||||
case leafReference:
|
||||
leaves = append(leaves, s)
|
||||
case trunkReference, trunkReferences:
|
||||
trunks = append(trunks, s)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// No leaf references to resolve.
|
||||
if len(leaves) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Collect the set of all leaf references to resolve.
|
||||
leafPtrs := make(map[value.Pointer]bool)
|
||||
for _, leaf := range leaves {
|
||||
leafPtrs[leaf.Metadata.(leafReference).p] = true
|
||||
}
|
||||
|
||||
// Collect the set of trunk pointers that are always paired together.
|
||||
// This allows us to assign a single ID to both pointers for brevity.
|
||||
// If a pointer in a pair ever occurs by itself or as a different pair,
|
||||
// then the pair is broken.
|
||||
pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
|
||||
unpair := func(p value.Pointer) {
|
||||
if !pairedTrunkPtrs[p].IsNil() {
|
||||
pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
|
||||
}
|
||||
pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
|
||||
}
|
||||
for _, trunk := range trunks {
|
||||
switch p := trunk.Metadata.(type) {
|
||||
case trunkReference:
|
||||
unpair(p.p) // standalone pointer cannot be part of a pair
|
||||
case trunkReferences:
|
||||
p0, ok0 := pairedTrunkPtrs[p.pp[0]]
|
||||
p1, ok1 := pairedTrunkPtrs[p.pp[1]]
|
||||
switch {
|
||||
case !ok0 && !ok1:
|
||||
// Register the newly seen pair.
|
||||
pairedTrunkPtrs[p.pp[0]] = p.pp[1]
|
||||
pairedTrunkPtrs[p.pp[1]] = p.pp[0]
|
||||
case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
|
||||
// Exact pair already seen; do nothing.
|
||||
default:
|
||||
// Pair conflicts with some other pair; break all pairs.
|
||||
unpair(p.pp[0])
|
||||
unpair(p.pp[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Correlate each pointer referenced by leaves to a unique identifier,
|
||||
// and print the IDs for each trunk that matches those pointers.
|
||||
var nextID uint
|
||||
ptrIDs := make(map[value.Pointer]uint)
|
||||
newID := func() uint {
|
||||
id := nextID
|
||||
nextID++
|
||||
return id
|
||||
}
|
||||
for _, trunk := range trunks {
|
||||
switch p := trunk.Metadata.(type) {
|
||||
case trunkReference:
|
||||
if print := leafPtrs[p.p]; print {
|
||||
id, ok := ptrIDs[p.p]
|
||||
if !ok {
|
||||
id = newID()
|
||||
ptrIDs[p.p] = id
|
||||
}
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||
}
|
||||
case trunkReferences:
|
||||
print0 := leafPtrs[p.pp[0]]
|
||||
print1 := leafPtrs[p.pp[1]]
|
||||
if print0 || print1 {
|
||||
id0, ok0 := ptrIDs[p.pp[0]]
|
||||
id1, ok1 := ptrIDs[p.pp[1]]
|
||||
isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
|
||||
if isPair {
|
||||
var id uint
|
||||
assert(ok0 == ok1) // must be seen together or not at all
|
||||
if ok0 {
|
||||
assert(id0 == id1) // must have the same ID
|
||||
id = id0
|
||||
} else {
|
||||
id = newID()
|
||||
ptrIDs[p.pp[0]] = id
|
||||
ptrIDs[p.pp[1]] = id
|
||||
}
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||
} else {
|
||||
if print0 && !ok0 {
|
||||
id0 = newID()
|
||||
ptrIDs[p.pp[0]] = id0
|
||||
}
|
||||
if print1 && !ok1 {
|
||||
id1 = newID()
|
||||
ptrIDs[p.pp[1]] = id1
|
||||
}
|
||||
switch {
|
||||
case print0 && print1:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
|
||||
case print0:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
|
||||
case print1:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update all leaf references with the unique identifier.
|
||||
for _, leaf := range leaves {
|
||||
if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
|
||||
leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatReference(id uint) string {
|
||||
return fmt.Sprintf("ref#%d", id)
|
||||
}
|
||||
|
||||
func updateReferencePrefix(prefix, ref string) string {
|
||||
if prefix == "" {
|
||||
return pointerDelimPrefix + ref + pointerDelimSuffix
|
||||
}
|
||||
suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
|
||||
return pointerDelimPrefix + ref + ": " + suffix
|
||||
}
|
||||
414
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
Normal file
414
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
Normal file
@@ -0,0 +1,414 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
var (
|
||||
anyType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
stringType = reflect.TypeOf((*string)(nil)).Elem()
|
||||
bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
|
||||
byteType = reflect.TypeOf((*byte)(nil)).Elem()
|
||||
)
|
||||
|
||||
type formatValueOptions struct {
|
||||
// AvoidStringer controls whether to avoid calling custom stringer
|
||||
// methods like error.Error or fmt.Stringer.String.
|
||||
AvoidStringer bool
|
||||
|
||||
// PrintAddresses controls whether to print the address of all pointers,
|
||||
// slice elements, and maps.
|
||||
PrintAddresses bool
|
||||
|
||||
// QualifiedNames controls whether FormatType uses the fully qualified name
|
||||
// (including the full package path as opposed to just the package name).
|
||||
QualifiedNames bool
|
||||
|
||||
// VerbosityLevel controls the amount of output to produce.
|
||||
// A higher value produces more output. A value of zero or lower produces
|
||||
// no output (represented using an ellipsis).
|
||||
// If LimitVerbosity is false, then the level is treated as infinite.
|
||||
VerbosityLevel int
|
||||
|
||||
// LimitVerbosity specifies that formatting should respect VerbosityLevel.
|
||||
LimitVerbosity bool
|
||||
}
|
||||
|
||||
// FormatType prints the type as if it were wrapping s.
|
||||
// This may return s as-is depending on the current type and TypeMode mode.
|
||||
func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
|
||||
// Check whether to emit the type or not.
|
||||
switch opts.TypeMode {
|
||||
case autoType:
|
||||
switch t.Kind() {
|
||||
case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
|
||||
if s.Equal(textNil) {
|
||||
return s
|
||||
}
|
||||
default:
|
||||
return s
|
||||
}
|
||||
if opts.DiffMode == diffIdentical {
|
||||
return s // elide type for identical nodes
|
||||
}
|
||||
case elideType:
|
||||
return s
|
||||
}
|
||||
|
||||
// Determine the type label, applying special handling for unnamed types.
|
||||
typeName := value.TypeString(t, opts.QualifiedNames)
|
||||
if t.Name() == "" {
|
||||
// According to Go grammar, certain type literals contain symbols that
|
||||
// do not strongly bind to the next lexicographical token (e.g., *T).
|
||||
switch t.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr:
|
||||
typeName = "(" + typeName + ")"
|
||||
}
|
||||
}
|
||||
return &textWrap{Prefix: typeName, Value: wrapParens(s)}
|
||||
}
|
||||
|
||||
// wrapParens wraps s with a set of parenthesis, but avoids it if the
|
||||
// wrapped node itself is already surrounded by a pair of parenthesis or braces.
|
||||
// It handles unwrapping one level of pointer-reference nodes.
|
||||
func wrapParens(s textNode) textNode {
|
||||
var refNode *textWrap
|
||||
if s2, ok := s.(*textWrap); ok {
|
||||
// Unwrap a single pointer reference node.
|
||||
switch s2.Metadata.(type) {
|
||||
case leafReference, trunkReference, trunkReferences:
|
||||
refNode = s2
|
||||
if s3, ok := refNode.Value.(*textWrap); ok {
|
||||
s2 = s3
|
||||
}
|
||||
}
|
||||
|
||||
// Already has delimiters that make parenthesis unnecessary.
|
||||
hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
|
||||
hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
|
||||
if hasParens || hasBraces {
|
||||
return s
|
||||
}
|
||||
}
|
||||
if refNode != nil {
|
||||
refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
|
||||
return s
|
||||
}
|
||||
return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
|
||||
}
|
||||
|
||||
// FormatValue prints the reflect.Value, taking extra care to avoid descending
|
||||
// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
|
||||
func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
// Check slice element for cycles.
|
||||
if parentKind == reflect.Slice {
|
||||
ptrRef, visited := ptrs.Push(v.Addr())
|
||||
if visited {
|
||||
return makeLeafReference(ptrRef, false)
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
|
||||
}
|
||||
|
||||
// Check whether there is an Error or String method to call.
|
||||
if !opts.AvoidStringer && v.CanInterface() {
|
||||
// Avoid calling Error or String methods on nil receivers since many
|
||||
// implementations crash when doing so.
|
||||
if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
|
||||
var prefix, strVal string
|
||||
func() {
|
||||
// Swallow and ignore any panics from String or Error.
|
||||
defer func() { recover() }()
|
||||
switch v := v.Interface().(type) {
|
||||
case error:
|
||||
strVal = v.Error()
|
||||
prefix = "e"
|
||||
case fmt.Stringer:
|
||||
strVal = v.String()
|
||||
prefix = "s"
|
||||
}
|
||||
}()
|
||||
if prefix != "" {
|
||||
return opts.formatString(prefix, strVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether to explicitly wrap the result with the type.
|
||||
var skipType bool
|
||||
defer func() {
|
||||
if !skipType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}()
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return textLine(fmt.Sprint(v.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return textLine(fmt.Sprint(v.Int()))
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return textLine(fmt.Sprint(v.Uint()))
|
||||
case reflect.Uint8:
|
||||
if parentKind == reflect.Slice || parentKind == reflect.Array {
|
||||
return textLine(formatHex(v.Uint()))
|
||||
}
|
||||
return textLine(fmt.Sprint(v.Uint()))
|
||||
case reflect.Uintptr:
|
||||
return textLine(formatHex(v.Uint()))
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return textLine(fmt.Sprint(v.Float()))
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return textLine(fmt.Sprint(v.Complex()))
|
||||
case reflect.String:
|
||||
return opts.formatString("", v.String())
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
return textLine(formatPointer(value.PointerOf(v), true))
|
||||
case reflect.Struct:
|
||||
var list textList
|
||||
v := makeAddressable(v) // needed for retrieveUnexportedField
|
||||
maxLen := v.NumField()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
vv := v.Field(i)
|
||||
if vv.IsZero() {
|
||||
continue // Elide fields with zero values
|
||||
}
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
sf := t.Field(i)
|
||||
if !isExported(sf.Name) {
|
||||
vv = retrieveUnexportedField(v, sf, true)
|
||||
}
|
||||
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Key: sf.Name, Value: s})
|
||||
}
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check whether this is a []byte of text data.
|
||||
if t.Elem() == byteType {
|
||||
b := v.Bytes()
|
||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
|
||||
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
||||
out = opts.formatString("", string(b))
|
||||
skipType = true
|
||||
return opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
|
||||
fallthrough
|
||||
case reflect.Array:
|
||||
maxLen := v.Len()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
var list textList
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Value: s})
|
||||
}
|
||||
|
||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if t.Kind() == reflect.Slice && opts.PrintAddresses {
|
||||
header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
|
||||
out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
|
||||
}
|
||||
return out
|
||||
case reflect.Map:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check pointer for cycles.
|
||||
ptrRef, visited := ptrs.Push(v)
|
||||
if visited {
|
||||
return makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
maxLen := v.Len()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
var list textList
|
||||
for _, k := range value.SortKeys(v.MapKeys()) {
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
sk := formatMapKey(k, false, ptrs)
|
||||
sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Key: sk, Value: sv})
|
||||
}
|
||||
|
||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
return out
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check pointer for cycles.
|
||||
ptrRef, visited := ptrs.Push(v)
|
||||
if visited {
|
||||
out = makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||
return &textWrap{Prefix: "&", Value: out}
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
// Skip the name only if this is an unnamed pointer type.
|
||||
// Otherwise taking the address of a value does not reproduce
|
||||
// the named pointer type.
|
||||
if v.Type().Name() == "" {
|
||||
skipType = true // Let the underlying value print the type instead
|
||||
}
|
||||
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
return out
|
||||
case reflect.Interface:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
// Interfaces accept different concrete types,
|
||||
// so configure the underlying value to explicitly print the type.
|
||||
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatString(prefix, s string) textNode {
|
||||
maxLen := len(s)
|
||||
maxLines := strings.Count(s, "\n") + 1
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
|
||||
maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
||||
}
|
||||
|
||||
// For multiline strings, use the triple-quote syntax,
|
||||
// but only use it when printing removed or inserted nodes since
|
||||
// we only want the extra verbosity for those cases.
|
||||
lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
|
||||
isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
|
||||
for i := 0; i < len(lines) && isTripleQuoted; i++ {
|
||||
lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
||||
isPrintable := func(r rune) bool {
|
||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
||||
}
|
||||
line := lines[i]
|
||||
isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
|
||||
}
|
||||
if isTripleQuoted {
|
||||
var list textList
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
||||
for i, line := range lines {
|
||||
if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
|
||||
comment := commentString(fmt.Sprintf("%d elided lines", numElided))
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
|
||||
break
|
||||
}
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
|
||||
}
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
||||
return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
|
||||
}
|
||||
|
||||
// Format the string as a single-line quoted string.
|
||||
if len(s) > maxLen+len(textEllipsis) {
|
||||
return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
|
||||
}
|
||||
return textLine(prefix + formatString(s))
|
||||
}
|
||||
|
||||
// formatMapKey formats v as if it were a map key.
|
||||
// The result is guaranteed to be a single line.
|
||||
func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
|
||||
var opts formatOptions
|
||||
opts.DiffMode = diffIdentical
|
||||
opts.TypeMode = elideType
|
||||
opts.PrintAddresses = disambiguate
|
||||
opts.AvoidStringer = disambiguate
|
||||
opts.QualifiedNames = disambiguate
|
||||
opts.VerbosityLevel = maxVerbosityPreset
|
||||
opts.LimitVerbosity = true
|
||||
s := opts.FormatValue(v, reflect.Map, ptrs).String()
|
||||
return strings.TrimSpace(s)
|
||||
}
|
||||
|
||||
// formatString prints s as a double-quoted or backtick-quoted string.
|
||||
func formatString(s string) string {
|
||||
// Use quoted string if it the same length as a raw string literal.
|
||||
// Otherwise, attempt to use the raw string form.
|
||||
qs := strconv.Quote(s)
|
||||
if len(qs) == 1+len(s)+1 {
|
||||
return qs
|
||||
}
|
||||
|
||||
// Disallow newlines to ensure output is a single line.
|
||||
// Only allow printable runes for readability purposes.
|
||||
rawInvalid := func(r rune) bool {
|
||||
return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
|
||||
}
|
||||
if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
|
||||
return "`" + s + "`"
|
||||
}
|
||||
return qs
|
||||
}
|
||||
|
||||
// formatHex prints u as a hexadecimal integer in Go notation.
|
||||
func formatHex(u uint64) string {
|
||||
var f string
|
||||
switch {
|
||||
case u <= 0xff:
|
||||
f = "0x%02x"
|
||||
case u <= 0xffff:
|
||||
f = "0x%04x"
|
||||
case u <= 0xffffff:
|
||||
f = "0x%06x"
|
||||
case u <= 0xffffffff:
|
||||
f = "0x%08x"
|
||||
case u <= 0xffffffffff:
|
||||
f = "0x%010x"
|
||||
case u <= 0xffffffffffff:
|
||||
f = "0x%012x"
|
||||
case u <= 0xffffffffffffff:
|
||||
f = "0x%014x"
|
||||
case u <= 0xffffffffffffffff:
|
||||
f = "0x%016x"
|
||||
}
|
||||
return fmt.Sprintf(f, u)
|
||||
}
|
||||
614
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
Normal file
614
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
Normal file
@@ -0,0 +1,614 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
)
|
||||
|
||||
// CanFormatDiffSlice reports whether we support custom formatting for nodes
|
||||
// that are slices of primitive kinds or strings.
|
||||
func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
|
||||
switch {
|
||||
case opts.DiffMode != diffUnknown:
|
||||
return false // Must be formatting in diff mode
|
||||
case v.NumDiff == 0:
|
||||
return false // No differences detected
|
||||
case !v.ValueX.IsValid() || !v.ValueY.IsValid():
|
||||
return false // Both values must be valid
|
||||
case v.NumIgnored > 0:
|
||||
return false // Some ignore option was used
|
||||
case v.NumTransformed > 0:
|
||||
return false // Some transform option was used
|
||||
case v.NumCompared > 1:
|
||||
return false // More than one comparison was used
|
||||
case v.NumCompared == 1 && v.Type.Name() != "":
|
||||
// The need for cmp to check applicability of options on every element
|
||||
// in a slice is a significant performance detriment for large []byte.
|
||||
// The workaround is to specify Comparer(bytes.Equal),
|
||||
// which enables cmp to compare []byte more efficiently.
|
||||
// If they differ, we still want to provide batched diffing.
|
||||
// The logic disallows named types since they tend to have their own
|
||||
// String method, with nicer formatting than what this provides.
|
||||
return false
|
||||
}
|
||||
|
||||
// Check whether this is an interface with the same concrete types.
|
||||
t := v.Type
|
||||
vx, vy := v.ValueX, v.ValueY
|
||||
if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
t = vx.Type()
|
||||
}
|
||||
|
||||
// Check whether we provide specialized diffing for this type.
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
case reflect.Array, reflect.Slice:
|
||||
// Only slices of primitive types have specialized handling.
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// Both slice values have to be non-empty.
|
||||
if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
// If a sufficient number of elements already differ,
|
||||
// use specialized formatting even if length requirement is not met.
|
||||
if v.NumDiff > v.NumSame {
|
||||
return true
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// Use specialized string diffing for longer slices or strings.
|
||||
const minLength = 32
|
||||
return vx.Len() >= minLength && vy.Len() >= minLength
|
||||
}
|
||||
|
||||
// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
|
||||
// This provides custom-tailored logic to make printing of differences in
|
||||
// textual strings and slices of primitive kinds more readable.
|
||||
func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
assert(opts.DiffMode == diffUnknown)
|
||||
t, vx, vy := v.Type, v.ValueX, v.ValueY
|
||||
if t.Kind() == reflect.Interface {
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
t = vx.Type()
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
}
|
||||
|
||||
// Auto-detect the type of the data.
|
||||
var sx, sy string
|
||||
var ssx, ssy []string
|
||||
var isString, isMostlyText, isPureLinedText, isBinary bool
|
||||
switch {
|
||||
case t.Kind() == reflect.String:
|
||||
sx, sy = vx.String(), vy.String()
|
||||
isString = true
|
||||
case t.Kind() == reflect.Slice && t.Elem() == byteType:
|
||||
sx, sy = string(vx.Bytes()), string(vy.Bytes())
|
||||
isString = true
|
||||
case t.Kind() == reflect.Array:
|
||||
// Arrays need to be addressable for slice operations to work.
|
||||
vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
|
||||
vx2.Set(vx)
|
||||
vy2.Set(vy)
|
||||
vx, vy = vx2, vy2
|
||||
}
|
||||
if isString {
|
||||
var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
|
||||
for i, r := range sx + sy {
|
||||
numTotalRunes++
|
||||
if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
|
||||
numValidRunes++
|
||||
}
|
||||
if r == '\n' {
|
||||
if maxLineLen < i-lastLineIdx {
|
||||
maxLineLen = i - lastLineIdx
|
||||
}
|
||||
lastLineIdx = i + 1
|
||||
numLines++
|
||||
}
|
||||
}
|
||||
isPureText := numValidRunes == numTotalRunes
|
||||
isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
|
||||
isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
|
||||
isBinary = !isMostlyText
|
||||
|
||||
// Avoid diffing by lines if it produces a significantly more complex
|
||||
// edit script than diffing by bytes.
|
||||
if isPureLinedText {
|
||||
ssx = strings.Split(sx, "\n")
|
||||
ssy = strings.Split(sy, "\n")
|
||||
esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(ssx[ix] == ssy[iy])
|
||||
})
|
||||
esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(sx[ix] == sy[iy])
|
||||
})
|
||||
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
|
||||
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
|
||||
quotedLength := len(strconv.Quote(sx + sy))
|
||||
unquotedLength := len(sx) + len(sy)
|
||||
escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
|
||||
isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
|
||||
}
|
||||
}
|
||||
|
||||
// Format the string into printable records.
|
||||
var list textList
|
||||
var delim string
|
||||
switch {
|
||||
// If the text appears to be multi-lined text,
|
||||
// then perform differencing across individual lines.
|
||||
case isPureLinedText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
s := formatString(v.Index(0).String())
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
delim = "\n"
|
||||
|
||||
// If possible, use a custom triple-quote (""") syntax for printing
|
||||
// differences in a string literal. This format is more readable,
|
||||
// but has edge-cases where differences are visually indistinguishable.
|
||||
// This format is avoided under the following conditions:
|
||||
// - A line starts with `"""`
|
||||
// - A line starts with "..."
|
||||
// - A line contains non-printable characters
|
||||
// - Adjacent different lines differ only by whitespace
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// """
|
||||
// ... // 3 identical lines
|
||||
// foo
|
||||
// bar
|
||||
// - baz
|
||||
// + BAZ
|
||||
// """
|
||||
isTripleQuoted := true
|
||||
prevRemoveLines := map[string]bool{}
|
||||
prevInsertLines := map[string]bool{}
|
||||
var list2 textList
|
||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||
for _, r := range list {
|
||||
if !r.Value.Equal(textEllipsis) {
|
||||
line, _ := strconv.Unquote(string(r.Value.(textLine)))
|
||||
line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
||||
normLine := strings.Map(func(r rune) rune {
|
||||
if unicode.IsSpace(r) {
|
||||
return -1 // drop whitespace to avoid visually indistinguishable output
|
||||
}
|
||||
return r
|
||||
}, line)
|
||||
isPrintable := func(r rune) bool {
|
||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
||||
}
|
||||
isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
|
||||
switch r.Diff {
|
||||
case diffRemoved:
|
||||
isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
|
||||
prevRemoveLines[normLine] = true
|
||||
case diffInserted:
|
||||
isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
|
||||
prevInsertLines[normLine] = true
|
||||
}
|
||||
if !isTripleQuoted {
|
||||
break
|
||||
}
|
||||
r.Value = textLine(line)
|
||||
r.ElideComma = true
|
||||
}
|
||||
if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
|
||||
prevRemoveLines = map[string]bool{}
|
||||
prevInsertLines = map[string]bool{}
|
||||
}
|
||||
list2 = append(list2, r)
|
||||
}
|
||||
if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
|
||||
list2 = list2[:len(list2)-1] // elide single empty line at the end
|
||||
}
|
||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||
if isTripleQuoted {
|
||||
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
if t != stringType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Always emit type for slices since the triple-quote syntax
|
||||
// looks like a string (not a slice).
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// If the text appears to be single-lined text,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is printed as quoted strings.
|
||||
case isMostlyText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
s := formatString(v.String())
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
|
||||
// If the text appears to be binary data,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is inspired by hexdump.
|
||||
case isBinary:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
var ss []string
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
||||
}
|
||||
s := strings.Join(ss, ", ")
|
||||
comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
|
||||
return textRecord{Diff: d, Value: textLine(s), Comment: comment}
|
||||
},
|
||||
)
|
||||
|
||||
// For all other slices of primitive types,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The size of each chunk depends on the width of the element kind.
|
||||
default:
|
||||
var chunkSize int
|
||||
if t.Elem().Kind() == reflect.Bool {
|
||||
chunkSize = 16
|
||||
} else {
|
||||
switch t.Elem().Bits() {
|
||||
case 8:
|
||||
chunkSize = 16
|
||||
case 16:
|
||||
chunkSize = 12
|
||||
case 32:
|
||||
chunkSize = 8
|
||||
default:
|
||||
chunkSize = 8
|
||||
}
|
||||
}
|
||||
list = opts.formatDiffSlice(
|
||||
vx, vy, chunkSize, t.Elem().Kind().String(),
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
var ss []string
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Int()))
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
|
||||
case reflect.Uint8, reflect.Uintptr:
|
||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
|
||||
}
|
||||
}
|
||||
s := strings.Join(ss, ", ")
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Wrap the output with appropriate type information.
|
||||
var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if !isMostlyText {
|
||||
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
|
||||
// Emit the type for extra clarity (e.g. "string{...}").
|
||||
if t.Kind() == reflect.String {
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
}
|
||||
return opts.FormatType(t, out)
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != stringType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != bytesType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// formatASCII formats s as an ASCII string.
|
||||
// This is useful for printing binary strings in a semi-legible way.
|
||||
func formatASCII(s string) string {
|
||||
b := bytes.Repeat([]byte{'.'}, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
if ' ' <= s[i] && s[i] <= '~' {
|
||||
b[i] = s[i]
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatDiffSlice(
|
||||
vx, vy reflect.Value, chunkSize int, name string,
|
||||
makeRec func(reflect.Value, diffMode) textRecord,
|
||||
) (list textList) {
|
||||
eq := func(ix, iy int) bool {
|
||||
return vx.Index(ix).Interface() == vy.Index(iy).Interface()
|
||||
}
|
||||
es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(eq(ix, iy))
|
||||
})
|
||||
|
||||
appendChunks := func(v reflect.Value, d diffMode) int {
|
||||
n0 := v.Len()
|
||||
for v.Len() > 0 {
|
||||
n := chunkSize
|
||||
if n > v.Len() {
|
||||
n = v.Len()
|
||||
}
|
||||
list = append(list, makeRec(v.Slice(0, n), d))
|
||||
v = v.Slice(n, v.Len())
|
||||
}
|
||||
return n0 - v.Len()
|
||||
}
|
||||
|
||||
var numDiffs int
|
||||
maxLen := -1
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
|
||||
groups := coalesceAdjacentEdits(name, es)
|
||||
groups = coalesceInterveningIdentical(groups, chunkSize/4)
|
||||
groups = cleanupSurroundingIdentical(groups, eq)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
maxGroup = maxGroup.Append(ds)
|
||||
continue
|
||||
}
|
||||
|
||||
// Print equal.
|
||||
if ds.NumDiff() == 0 {
|
||||
// Compute the number of leading and trailing equal bytes to print.
|
||||
var numLo, numHi int
|
||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
||||
for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
|
||||
numLo++
|
||||
}
|
||||
for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
||||
numHi++
|
||||
}
|
||||
if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
|
||||
numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
|
||||
}
|
||||
|
||||
// Print the equal bytes.
|
||||
appendChunks(vx.Slice(0, numLo), diffIdentical)
|
||||
if numEqual > numLo+numHi {
|
||||
ds.NumIdentical -= numLo + numHi
|
||||
list.AppendEllipsis(ds)
|
||||
}
|
||||
appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
|
||||
vx = vx.Slice(numEqual, vx.Len())
|
||||
vy = vy.Slice(numEqual, vy.Len())
|
||||
continue
|
||||
}
|
||||
|
||||
// Print unequal.
|
||||
len0 := len(list)
|
||||
nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
|
||||
vx = vx.Slice(nx, vx.Len())
|
||||
ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
|
||||
vy = vy.Slice(ny, vy.Len())
|
||||
numDiffs += len(list) - len0
|
||||
}
|
||||
if maxGroup.IsZero() {
|
||||
assert(vx.Len() == 0 && vy.Len() == 0)
|
||||
} else {
|
||||
list.AppendEllipsis(maxGroup)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
|
||||
// equal or unequal counts.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: "..XXY...Y"
|
||||
// Output: [
|
||||
// {NumIdentical: 2},
|
||||
// {NumRemoved: 2, NumInserted 1},
|
||||
// {NumIdentical: 3},
|
||||
// {NumInserted: 1},
|
||||
// ]
|
||||
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
|
||||
var prevMode byte
|
||||
lastStats := func(mode byte) *diffStats {
|
||||
if prevMode != mode {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevMode = mode
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case diff.Identity:
|
||||
lastStats('=').NumIdentical++
|
||||
case diff.UniqueX:
|
||||
lastStats('!').NumRemoved++
|
||||
case diff.UniqueY:
|
||||
lastStats('!').NumInserted++
|
||||
case diff.Modified:
|
||||
lastStats('!').NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
|
||||
// equal groups into adjacent unequal groups that currently result in a
|
||||
// dual inserted/removed printout. This acts as a high-pass filter to smooth
|
||||
// out high-frequency changes within the windowSize.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// WindowSize: 16,
|
||||
// Input: [
|
||||
// {NumIdentical: 61}, // group 0
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 1
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 9}, // └── coalesce
|
||||
// {NumIdentical: 64}, // group 2
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 3
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 7}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 2}, // └── coalesce
|
||||
// {NumIdentical: 63}, // group 4
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 64},
|
||||
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 63},
|
||||
// ]
|
||||
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
|
||||
groups, groupsOrig := groups[:0], groups
|
||||
for i, ds := range groupsOrig {
|
||||
if len(groups) >= 2 && ds.NumDiff() > 0 {
|
||||
prev := &groups[len(groups)-2] // Unequal group
|
||||
curr := &groups[len(groups)-1] // Equal group
|
||||
next := &groupsOrig[i] // Unequal group
|
||||
hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
|
||||
hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
|
||||
if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
|
||||
*prev = prev.Append(*curr).Append(*next)
|
||||
groups = groups[:len(groups)-1] // Truncate off equal group
|
||||
continue
|
||||
}
|
||||
}
|
||||
groups = append(groups, ds)
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// cleanupSurroundingIdentical scans through all unequal groups, and
|
||||
// moves any leading sequence of equal elements to the preceding equal group and
|
||||
// moves and trailing sequence of equal elements to the succeeding equal group.
|
||||
//
|
||||
// This is necessary since coalesceInterveningIdentical may coalesce edit groups
|
||||
// together such that leading/trailing spans of equal elements becomes possible.
|
||||
// Note that this can occur even with an optimal diffing algorithm.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
|
||||
// {NumIdentical: 67},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
|
||||
// {NumIdentical: 54},
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 64}, // incremented by 3
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 67},
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 64}, // incremented by 10
|
||||
// ]
|
||||
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
|
||||
var ix, iy int // indexes into sequence x and y
|
||||
for i, ds := range groups {
|
||||
// Handle equal group.
|
||||
if ds.NumDiff() == 0 {
|
||||
ix += ds.NumIdentical
|
||||
iy += ds.NumIdentical
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle unequal group.
|
||||
nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
|
||||
ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
|
||||
var numLeadingIdentical, numTrailingIdentical int
|
||||
for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
|
||||
numLeadingIdentical++
|
||||
}
|
||||
for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
|
||||
numTrailingIdentical++
|
||||
}
|
||||
if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
|
||||
if numLeadingIdentical > 0 {
|
||||
// Remove leading identical span from this group and
|
||||
// insert it into the preceding group.
|
||||
if i-1 >= 0 {
|
||||
groups[i-1].NumIdentical += numLeadingIdentical
|
||||
} else {
|
||||
// No preceding group exists, so prepend a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
|
||||
}()
|
||||
}
|
||||
// Increment indexes since the preceding group would have handled this.
|
||||
ix += numLeadingIdentical
|
||||
iy += numLeadingIdentical
|
||||
}
|
||||
if numTrailingIdentical > 0 {
|
||||
// Remove trailing identical span from this group and
|
||||
// insert it into the succeeding group.
|
||||
if i+1 < len(groups) {
|
||||
groups[i+1].NumIdentical += numTrailingIdentical
|
||||
} else {
|
||||
// No succeeding group exists, so append a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
|
||||
}()
|
||||
}
|
||||
// Do not increment indexes since the succeeding group will handle this.
|
||||
}
|
||||
|
||||
// Update this group since some identical elements were removed.
|
||||
nx -= numIdentical
|
||||
ny -= numIdentical
|
||||
groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
|
||||
}
|
||||
ix += nx
|
||||
iy += ny
|
||||
}
|
||||
return groups
|
||||
}
|
||||
432
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
Normal file
432
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
Normal file
@@ -0,0 +1,432 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
)
|
||||
|
||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
|
||||
const maxColumnLength = 80
|
||||
|
||||
type indentMode int
|
||||
|
||||
func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
|
||||
// The output of Diff is documented as being unstable to provide future
|
||||
// flexibility in changing the output for more humanly readable reports.
|
||||
// This logic intentionally introduces instability to the exact output
|
||||
// so that users can detect accidental reliance on stability early on,
|
||||
// rather than much later when an actual change to the format occurs.
|
||||
if flags.Deterministic || randBool {
|
||||
// Use regular spaces (U+0020).
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
b = append(b, " "...)
|
||||
case diffRemoved:
|
||||
b = append(b, "- "...)
|
||||
case diffInserted:
|
||||
b = append(b, "+ "...)
|
||||
}
|
||||
} else {
|
||||
// Use non-breaking spaces (U+00a0).
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
b = append(b, " "...)
|
||||
case diffRemoved:
|
||||
b = append(b, "- "...)
|
||||
case diffInserted:
|
||||
b = append(b, "+ "...)
|
||||
}
|
||||
}
|
||||
return repeatCount(n).appendChar(b, '\t')
|
||||
}
|
||||
|
||||
type repeatCount int
|
||||
|
||||
func (n repeatCount) appendChar(b []byte, c byte) []byte {
|
||||
for ; n > 0; n-- {
|
||||
b = append(b, c)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// textNode is a simplified tree-based representation of structured text.
|
||||
// Possible node types are textWrap, textList, or textLine.
|
||||
type textNode interface {
|
||||
// Len reports the length in bytes of a single-line version of the tree.
|
||||
// Nested textRecord.Diff and textRecord.Comment fields are ignored.
|
||||
Len() int
|
||||
// Equal reports whether the two trees are structurally identical.
|
||||
// Nested textRecord.Diff and textRecord.Comment fields are compared.
|
||||
Equal(textNode) bool
|
||||
// String returns the string representation of the text tree.
|
||||
// It is not guaranteed that len(x.String()) == x.Len(),
|
||||
// nor that x.String() == y.String() implies that x.Equal(y).
|
||||
String() string
|
||||
|
||||
// formatCompactTo formats the contents of the tree as a single-line string
|
||||
// to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
|
||||
// fields are ignored.
|
||||
//
|
||||
// However, not all nodes in the tree should be collapsed as a single-line.
|
||||
// If a node can be collapsed as a single-line, it is replaced by a textLine
|
||||
// node. Since the top-level node cannot replace itself, this also returns
|
||||
// the current node itself.
|
||||
//
|
||||
// This does not mutate the receiver.
|
||||
formatCompactTo([]byte, diffMode) ([]byte, textNode)
|
||||
// formatExpandedTo formats the contents of the tree as a multi-line string
|
||||
// to the provided buffer. In order for column alignment to operate well,
|
||||
// formatCompactTo must be called before calling formatExpandedTo.
|
||||
formatExpandedTo([]byte, diffMode, indentMode) []byte
|
||||
}
|
||||
|
||||
// textWrap is a wrapper that concatenates a prefix and/or a suffix
|
||||
// to the underlying node.
|
||||
type textWrap struct {
|
||||
Prefix string // e.g., "bytes.Buffer{"
|
||||
Value textNode // textWrap | textList | textLine
|
||||
Suffix string // e.g., "}"
|
||||
Metadata interface{} // arbitrary metadata; has no effect on formatting
|
||||
}
|
||||
|
||||
func (s *textWrap) Len() int {
|
||||
return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
|
||||
}
|
||||
func (s1 *textWrap) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(*textWrap); ok {
|
||||
return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s *textWrap) String() string {
|
||||
var d diffMode
|
||||
var n indentMode
|
||||
_, s2 := s.formatCompactTo(nil, d)
|
||||
b := n.appendIndent(nil, d) // Leading indent
|
||||
b = s2.formatExpandedTo(b, d, n) // Main body
|
||||
b = append(b, '\n') // Trailing newline
|
||||
return string(b)
|
||||
}
|
||||
func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
n0 := len(b) // Original buffer length
|
||||
b = append(b, s.Prefix...)
|
||||
b, s.Value = s.Value.formatCompactTo(b, d)
|
||||
b = append(b, s.Suffix...)
|
||||
if _, ok := s.Value.(textLine); ok {
|
||||
return b, textLine(b[n0:])
|
||||
}
|
||||
return b, s
|
||||
}
|
||||
func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
||||
b = append(b, s.Prefix...)
|
||||
b = s.Value.formatExpandedTo(b, d, n)
|
||||
b = append(b, s.Suffix...)
|
||||
return b
|
||||
}
|
||||
|
||||
// textList is a comma-separated list of textWrap or textLine nodes.
|
||||
// The list may be formatted as multi-lines or single-line at the discretion
|
||||
// of the textList.formatCompactTo method.
|
||||
type textList []textRecord
|
||||
type textRecord struct {
|
||||
Diff diffMode // e.g., 0 or '-' or '+'
|
||||
Key string // e.g., "MyField"
|
||||
Value textNode // textWrap | textLine
|
||||
ElideComma bool // avoid trailing comma
|
||||
Comment fmt.Stringer // e.g., "6 identical fields"
|
||||
}
|
||||
|
||||
// AppendEllipsis appends a new ellipsis node to the list if none already
|
||||
// exists at the end. If cs is non-zero it coalesces the statistics with the
|
||||
// previous diffStats.
|
||||
func (s *textList) AppendEllipsis(ds diffStats) {
|
||||
hasStats := !ds.IsZero()
|
||||
if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
|
||||
if hasStats {
|
||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
|
||||
} else {
|
||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
|
||||
}
|
||||
return
|
||||
}
|
||||
if hasStats {
|
||||
(*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
|
||||
}
|
||||
}
|
||||
|
||||
func (s textList) Len() (n int) {
|
||||
for i, r := range s {
|
||||
n += len(r.Key)
|
||||
if r.Key != "" {
|
||||
n += len(": ")
|
||||
}
|
||||
n += r.Value.Len()
|
||||
if i < len(s)-1 {
|
||||
n += len(", ")
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (s1 textList) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(textList); ok {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i := range s1 {
|
||||
r1, r2 := s1[i], s2[i]
|
||||
if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s textList) String() string {
|
||||
return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
|
||||
}
|
||||
|
||||
func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
s = append(textList(nil), s...) // Avoid mutating original
|
||||
|
||||
// Determine whether we can collapse this list as a single line.
|
||||
n0 := len(b) // Original buffer length
|
||||
var multiLine bool
|
||||
for i, r := range s {
|
||||
if r.Diff == diffInserted || r.Diff == diffRemoved {
|
||||
multiLine = true
|
||||
}
|
||||
b = append(b, r.Key...)
|
||||
if r.Key != "" {
|
||||
b = append(b, ": "...)
|
||||
}
|
||||
b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
|
||||
if _, ok := s[i].Value.(textLine); !ok {
|
||||
multiLine = true
|
||||
}
|
||||
if r.Comment != nil {
|
||||
multiLine = true
|
||||
}
|
||||
if i < len(s)-1 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
}
|
||||
// Force multi-lined output when printing a removed/inserted node that
|
||||
// is sufficiently long.
|
||||
if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
|
||||
multiLine = true
|
||||
}
|
||||
if !multiLine {
|
||||
return b, textLine(b[n0:])
|
||||
}
|
||||
return b, s
|
||||
}
|
||||
|
||||
func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
||||
alignKeyLens := s.alignLens(
|
||||
func(r textRecord) bool {
|
||||
_, isLine := r.Value.(textLine)
|
||||
return r.Key == "" || !isLine
|
||||
},
|
||||
func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
|
||||
)
|
||||
alignValueLens := s.alignLens(
|
||||
func(r textRecord) bool {
|
||||
_, isLine := r.Value.(textLine)
|
||||
return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
|
||||
},
|
||||
func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
|
||||
)
|
||||
|
||||
// Format lists of simple lists in a batched form.
|
||||
// If the list is sequence of only textLine values,
|
||||
// then batch multiple values on a single line.
|
||||
var isSimple bool
|
||||
for _, r := range s {
|
||||
_, isLine := r.Value.(textLine)
|
||||
isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
|
||||
if !isSimple {
|
||||
break
|
||||
}
|
||||
}
|
||||
if isSimple {
|
||||
n++
|
||||
var batch []byte
|
||||
emitBatch := func() {
|
||||
if len(batch) > 0 {
|
||||
b = n.appendIndent(append(b, '\n'), d)
|
||||
b = append(b, bytes.TrimRight(batch, " ")...)
|
||||
batch = batch[:0]
|
||||
}
|
||||
}
|
||||
for _, r := range s {
|
||||
line := r.Value.(textLine)
|
||||
if len(batch)+len(line)+len(", ") > maxColumnLength {
|
||||
emitBatch()
|
||||
}
|
||||
batch = append(batch, line...)
|
||||
batch = append(batch, ", "...)
|
||||
}
|
||||
emitBatch()
|
||||
n--
|
||||
return n.appendIndent(append(b, '\n'), d)
|
||||
}
|
||||
|
||||
// Format the list as a multi-lined output.
|
||||
n++
|
||||
for i, r := range s {
|
||||
b = n.appendIndent(append(b, '\n'), d|r.Diff)
|
||||
if r.Key != "" {
|
||||
b = append(b, r.Key+": "...)
|
||||
}
|
||||
b = alignKeyLens[i].appendChar(b, ' ')
|
||||
|
||||
b = r.Value.formatExpandedTo(b, d|r.Diff, n)
|
||||
if !r.ElideComma {
|
||||
b = append(b, ',')
|
||||
}
|
||||
b = alignValueLens[i].appendChar(b, ' ')
|
||||
|
||||
if r.Comment != nil {
|
||||
b = append(b, " // "+r.Comment.String()...)
|
||||
}
|
||||
}
|
||||
n--
|
||||
|
||||
return n.appendIndent(append(b, '\n'), d)
|
||||
}
|
||||
|
||||
func (s textList) alignLens(
|
||||
skipFunc func(textRecord) bool,
|
||||
lenFunc func(textRecord) int,
|
||||
) []repeatCount {
|
||||
var startIdx, endIdx, maxLen int
|
||||
lens := make([]repeatCount, len(s))
|
||||
for i, r := range s {
|
||||
if skipFunc(r) {
|
||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
||||
}
|
||||
startIdx, endIdx, maxLen = i+1, i+1, 0
|
||||
} else {
|
||||
if maxLen < lenFunc(r) {
|
||||
maxLen = lenFunc(r)
|
||||
}
|
||||
endIdx = i + 1
|
||||
}
|
||||
}
|
||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
||||
}
|
||||
return lens
|
||||
}
|
||||
|
||||
// textLine is a single-line segment of text and is always a leaf node
|
||||
// in the textNode tree.
|
||||
type textLine []byte
|
||||
|
||||
var (
|
||||
textNil = textLine("nil")
|
||||
textEllipsis = textLine("...")
|
||||
)
|
||||
|
||||
func (s textLine) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s1 textLine) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(textLine); ok {
|
||||
return bytes.Equal([]byte(s1), []byte(s2))
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s textLine) String() string {
|
||||
return string(s)
|
||||
}
|
||||
func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
return append(b, s...), s
|
||||
}
|
||||
func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
|
||||
return append(b, s...)
|
||||
}
|
||||
|
||||
type diffStats struct {
|
||||
Name string
|
||||
NumIgnored int
|
||||
NumIdentical int
|
||||
NumRemoved int
|
||||
NumInserted int
|
||||
NumModified int
|
||||
}
|
||||
|
||||
func (s diffStats) IsZero() bool {
|
||||
s.Name = ""
|
||||
return s == diffStats{}
|
||||
}
|
||||
|
||||
func (s diffStats) NumDiff() int {
|
||||
return s.NumRemoved + s.NumInserted + s.NumModified
|
||||
}
|
||||
|
||||
func (s diffStats) Append(ds diffStats) diffStats {
|
||||
assert(s.Name == ds.Name)
|
||||
s.NumIgnored += ds.NumIgnored
|
||||
s.NumIdentical += ds.NumIdentical
|
||||
s.NumRemoved += ds.NumRemoved
|
||||
s.NumInserted += ds.NumInserted
|
||||
s.NumModified += ds.NumModified
|
||||
return s
|
||||
}
|
||||
|
||||
// String prints a humanly-readable summary of coalesced records.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
|
||||
func (s diffStats) String() string {
|
||||
var ss []string
|
||||
var sum int
|
||||
labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
|
||||
counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
|
||||
for i, n := range counts {
|
||||
if n > 0 {
|
||||
ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
|
||||
}
|
||||
sum += n
|
||||
}
|
||||
|
||||
// Pluralize the name (adjusting for some obscure English grammar rules).
|
||||
name := s.Name
|
||||
if sum > 1 {
|
||||
name += "s"
|
||||
if strings.HasSuffix(name, "ys") {
|
||||
name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
|
||||
}
|
||||
}
|
||||
|
||||
// Format the list according to English grammar (with Oxford comma).
|
||||
switch n := len(ss); n {
|
||||
case 0:
|
||||
return ""
|
||||
case 1, 2:
|
||||
return strings.Join(ss, " and ") + " " + name
|
||||
default:
|
||||
return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
|
||||
}
|
||||
}
|
||||
|
||||
type commentString string
|
||||
|
||||
func (s commentString) String() string { return string(s) }
|
||||
121
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
Normal file
121
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import "reflect"
|
||||
|
||||
// valueNode represents a single node within a report, which is a
|
||||
// structured representation of the value tree, containing information
|
||||
// regarding which nodes are equal or not.
|
||||
type valueNode struct {
|
||||
parent *valueNode
|
||||
|
||||
Type reflect.Type
|
||||
ValueX reflect.Value
|
||||
ValueY reflect.Value
|
||||
|
||||
// NumSame is the number of leaf nodes that are equal.
|
||||
// All descendants are equal only if NumDiff is 0.
|
||||
NumSame int
|
||||
// NumDiff is the number of leaf nodes that are not equal.
|
||||
NumDiff int
|
||||
// NumIgnored is the number of leaf nodes that are ignored.
|
||||
NumIgnored int
|
||||
// NumCompared is the number of leaf nodes that were compared
|
||||
// using an Equal method or Comparer function.
|
||||
NumCompared int
|
||||
// NumTransformed is the number of non-leaf nodes that were transformed.
|
||||
NumTransformed int
|
||||
// NumChildren is the number of transitive descendants of this node.
|
||||
// This counts from zero; thus, leaf nodes have no descendants.
|
||||
NumChildren int
|
||||
// MaxDepth is the maximum depth of the tree. This counts from zero;
|
||||
// thus, leaf nodes have a depth of zero.
|
||||
MaxDepth int
|
||||
|
||||
// Records is a list of struct fields, slice elements, or map entries.
|
||||
Records []reportRecord // If populated, implies Value is not populated
|
||||
|
||||
// Value is the result of a transformation, pointer indirect, of
|
||||
// type assertion.
|
||||
Value *valueNode // If populated, implies Records is not populated
|
||||
|
||||
// TransformerName is the name of the transformer.
|
||||
TransformerName string // If non-empty, implies Value is populated
|
||||
}
|
||||
type reportRecord struct {
|
||||
Key reflect.Value // Invalid for slice element
|
||||
Value *valueNode
|
||||
}
|
||||
|
||||
func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
|
||||
vx, vy := ps.Values()
|
||||
child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
|
||||
switch s := ps.(type) {
|
||||
case StructField:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
|
||||
case SliceIndex:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Value: child})
|
||||
case MapIndex:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
|
||||
case Indirect:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
case TypeAssertion:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
case Transform:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
parent.TransformerName = s.Name()
|
||||
parent.NumTransformed++
|
||||
default:
|
||||
assert(parent == nil) // Must be the root step
|
||||
}
|
||||
return child
|
||||
}
|
||||
|
||||
func (r *valueNode) Report(rs Result) {
|
||||
assert(r.MaxDepth == 0) // May only be called on leaf nodes
|
||||
|
||||
if rs.ByIgnore() {
|
||||
r.NumIgnored++
|
||||
} else {
|
||||
if rs.Equal() {
|
||||
r.NumSame++
|
||||
} else {
|
||||
r.NumDiff++
|
||||
}
|
||||
}
|
||||
assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
|
||||
|
||||
if rs.ByMethod() {
|
||||
r.NumCompared++
|
||||
}
|
||||
if rs.ByFunc() {
|
||||
r.NumCompared++
|
||||
}
|
||||
assert(r.NumCompared <= 1)
|
||||
}
|
||||
|
||||
func (child *valueNode) PopStep() (parent *valueNode) {
|
||||
if child.parent == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child.parent
|
||||
parent.NumSame += child.NumSame
|
||||
parent.NumDiff += child.NumDiff
|
||||
parent.NumIgnored += child.NumIgnored
|
||||
parent.NumCompared += child.NumCompared
|
||||
parent.NumTransformed += child.NumTransformed
|
||||
parent.NumChildren += child.NumChildren + 1
|
||||
if parent.MaxDepth < child.MaxDepth+1 {
|
||||
parent.MaxDepth = child.MaxDepth + 1
|
||||
}
|
||||
return parent
|
||||
}
|
||||
5
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
5
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
@@ -122,6 +122,7 @@ func (p *Profile) preEncode() {
|
||||
}
|
||||
|
||||
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
|
||||
p.docURLX = addString(strings, p.DocURL)
|
||||
|
||||
p.stringTable = make([]string, len(strings))
|
||||
for s, i := range strings {
|
||||
@@ -156,6 +157,7 @@ func (p *Profile) encode(b *buffer) {
|
||||
encodeInt64Opt(b, 12, p.Period)
|
||||
encodeInt64s(b, 13, p.commentX)
|
||||
encodeInt64(b, 14, p.defaultSampleTypeX)
|
||||
encodeInt64Opt(b, 15, p.docURLX)
|
||||
}
|
||||
|
||||
var profileDecoder = []decoder{
|
||||
@@ -237,6 +239,8 @@ var profileDecoder = []decoder{
|
||||
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
|
||||
// int64 defaultSampleType = 14
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
|
||||
// string doc_link = 15;
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) },
|
||||
}
|
||||
|
||||
// postDecode takes the unexported fields populated by decode (with
|
||||
@@ -384,6 +388,7 @@ func (p *Profile) postDecode() error {
|
||||
|
||||
p.commentX = nil
|
||||
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
|
||||
p.DocURL, err = getString(p.stringTable, &p.docURLX, err)
|
||||
p.stringTable = nil
|
||||
return err
|
||||
}
|
||||
|
||||
5
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
5
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
@@ -476,6 +476,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||
var timeNanos, durationNanos, period int64
|
||||
var comments []string
|
||||
seenComments := map[string]bool{}
|
||||
var docURL string
|
||||
var defaultSampleType string
|
||||
for _, s := range srcs {
|
||||
if timeNanos == 0 || s.TimeNanos < timeNanos {
|
||||
@@ -494,6 +495,9 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||
if defaultSampleType == "" {
|
||||
defaultSampleType = s.DefaultSampleType
|
||||
}
|
||||
if docURL == "" {
|
||||
docURL = s.DocURL
|
||||
}
|
||||
}
|
||||
|
||||
p := &Profile{
|
||||
@@ -509,6 +513,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||
|
||||
Comments: comments,
|
||||
DefaultSampleType: defaultSampleType,
|
||||
DocURL: docURL,
|
||||
}
|
||||
copy(p.SampleType, srcs[0].SampleType)
|
||||
return p, nil
|
||||
|
||||
9
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
9
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
@@ -39,6 +39,7 @@ type Profile struct {
|
||||
Location []*Location
|
||||
Function []*Function
|
||||
Comments []string
|
||||
DocURL string
|
||||
|
||||
DropFrames string
|
||||
KeepFrames string
|
||||
@@ -53,6 +54,7 @@ type Profile struct {
|
||||
encodeMu sync.Mutex
|
||||
|
||||
commentX []int64
|
||||
docURLX int64
|
||||
dropFramesX int64
|
||||
keepFramesX int64
|
||||
stringTable []string
|
||||
@@ -555,6 +557,9 @@ func (p *Profile) String() string {
|
||||
for _, c := range p.Comments {
|
||||
ss = append(ss, "Comment: "+c)
|
||||
}
|
||||
if url := p.DocURL; url != "" {
|
||||
ss = append(ss, fmt.Sprintf("Doc: %s", url))
|
||||
}
|
||||
if pt := p.PeriodType; pt != nil {
|
||||
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
|
||||
}
|
||||
@@ -844,10 +849,10 @@ func (p *Profile) HasFileLines() bool {
|
||||
|
||||
// Unsymbolizable returns true if a mapping points to a binary for which
|
||||
// locations can't be symbolized in principle, at least now. Examples are
|
||||
// "[vdso]", [vsyscall]" and some others, see the code.
|
||||
// "[vdso]", "[vsyscall]" and some others, see the code.
|
||||
func (m *Mapping) Unsymbolizable() bool {
|
||||
name := filepath.Base(m.File)
|
||||
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
|
||||
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon"
|
||||
}
|
||||
|
||||
// Copy makes a fully independent copy of a profile.
|
||||
|
||||
13
vendor/github.com/x448/float16/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/x448/float16/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.11.x
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
script:
|
||||
- go test -short -coverprofile=coverage.txt -covermode=count ./...
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
22
vendor/github.com/x448/float16/LICENSE
generated
vendored
Normal file
22
vendor/github.com/x448/float16/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
133
vendor/github.com/x448/float16/README.md
generated
vendored
Normal file
133
vendor/github.com/x448/float16/README.md
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
# Float16 (Binary16) in Go/Golang
|
||||
[](https://travis-ci.org/x448/float16)
|
||||
[](https://codecov.io/gh/x448/float16)
|
||||
[](https://goreportcard.com/report/github.com/x448/float16)
|
||||
[](https://github.com/x448/float16/releases)
|
||||
[](https://raw.githubusercontent.com/x448/float16/master/LICENSE)
|
||||
|
||||
`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16.
|
||||
|
||||
IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result.
|
||||
|
||||
All possible 4+ billion floating-point conversions with this library are verified to be correct.
|
||||
|
||||
Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library.
|
||||
|
||||
## Features
|
||||
Current features include:
|
||||
|
||||
* float16 to float32 conversions use lossless conversion.
|
||||
* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven".
|
||||
* conversions using pure Go take about 2.65 ns/op on a desktop amd64.
|
||||
* unit tests provide 100% code coverage and check all possible 4+ billion conversions.
|
||||
* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc.
|
||||
* all functions in this library use zero allocs except String().
|
||||
|
||||
## Status
|
||||
This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published.
|
||||
|
||||
Current status:
|
||||
|
||||
* core API is done and breaking API changes are unlikely.
|
||||
* 100% of unit tests pass:
|
||||
* short mode (`go test -short`) tests around 65765 conversions in 0.005s.
|
||||
* normal mode (`go test`) tests all possible 4+ billion conversions in about 95s.
|
||||
* 100% code coverage with both short mode and normal mode.
|
||||
* tested on amd64 but it should work on all little-endian platforms supported by Go.
|
||||
|
||||
Roadmap:
|
||||
|
||||
* add functions for fast batch conversions leveraging SIMD when supported by hardware.
|
||||
* speed up unit test when verifying all possible 4+ billion conversions.
|
||||
* test on additional platforms.
|
||||
|
||||
## Float16 to Float32 Conversion
|
||||
Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct.
|
||||
|
||||
Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions.
|
||||
|
||||
## Float32 to Float16 Conversion
|
||||
Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct.
|
||||
|
||||
Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32().
|
||||
|
||||
Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage.
|
||||
|
||||
## Usage
|
||||
Install with `go get github.com/x448/float16`.
|
||||
```
|
||||
// Convert float32 to float16
|
||||
pi := float32(math.Pi)
|
||||
pi16 := float16.Fromfloat32(pi)
|
||||
|
||||
// Convert float16 to float32
|
||||
pi32 := pi16.Float32()
|
||||
|
||||
// PrecisionFromfloat32() is faster than the overhead of calling a function.
|
||||
// This example only converts if there's no data loss and input is not a subnormal.
|
||||
if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact {
|
||||
pi16 := float16.Fromfloat32(pi)
|
||||
}
|
||||
```
|
||||
|
||||
## Float16 Type and API
|
||||
Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods.
|
||||
```
|
||||
package float16 // import "github.com/x448/float16"
|
||||
|
||||
// Exported types and consts
|
||||
type Float16 uint16
|
||||
const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
|
||||
|
||||
// Exported functions
|
||||
Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding
|
||||
with identical results to AMD and Intel F16C hardware. NaN inputs
|
||||
are converted with quiet bit always set on, to be like F16C.
|
||||
|
||||
FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit.
|
||||
// The "ps" suffix means "preserve signaling".
|
||||
// Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN.
|
||||
|
||||
Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.)
|
||||
NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number
|
||||
Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign
|
||||
|
||||
PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow
|
||||
// (inline and < 1 ns/op)
|
||||
// Exported methods
|
||||
(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion
|
||||
(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f
|
||||
(f Float16) IsNaN() bool // true if f is not-a-number (NaN)
|
||||
(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN)
|
||||
(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf)
|
||||
(f Float16) IsFinite() bool // true if f is not infinite or NaN
|
||||
(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN.
|
||||
(f Float16) Signbit() bool // true if f is negative or negative zero
|
||||
(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface
|
||||
```
|
||||
See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info.
|
||||
|
||||
## Benchmarks
|
||||
Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value.
|
||||
|
||||
```
|
||||
All functions have zero allocations except float16.String().
|
||||
|
||||
FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16
|
||||
ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32
|
||||
Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16
|
||||
|
||||
PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc.
|
||||
```
|
||||
|
||||
## System Requirements
|
||||
* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions.
|
||||
* Tested on amd64 but it should also work on all little-endian platforms supported by Go.
|
||||
|
||||
## Special Thanks
|
||||
Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16.
|
||||
|
||||
## License
|
||||
Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||
|
||||
Licensed under [MIT License](LICENSE)
|
||||
302
vendor/github.com/x448/float16/float16.go
generated
vendored
Normal file
302
vendor/github.com/x448/float16/float16.go
generated
vendored
Normal file
@@ -0,0 +1,302 @@
|
||||
// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||
//
|
||||
// Special thanks to Kathryn Long for her Rust implementation
|
||||
// of float16 at github.com/starkat99/half-rs (MIT license)
|
||||
|
||||
package float16
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Float16 represents IEEE 754 half-precision floating-point numbers (binary16).
|
||||
type Float16 uint16
|
||||
|
||||
// Precision indicates whether the conversion to Float16 is
|
||||
// exact, subnormal without dropped bits, inexact, underflow, or overflow.
|
||||
type Precision int
|
||||
|
||||
const (
|
||||
|
||||
// PrecisionExact is for non-subnormals that don't drop bits during conversion.
|
||||
// All of these can round-trip. Should always convert to float16.
|
||||
PrecisionExact Precision = iota
|
||||
|
||||
// PrecisionUnknown is for subnormals that don't drop bits during conversion but
|
||||
// not all of these can round-trip so precision is unknown without more effort.
|
||||
// Only 2046 of these can round-trip and the rest cannot round-trip.
|
||||
PrecisionUnknown
|
||||
|
||||
// PrecisionInexact is for dropped significand bits and cannot round-trip.
|
||||
// Some of these are subnormals. Cannot round-trip float32->float16->float32.
|
||||
PrecisionInexact
|
||||
|
||||
// PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32.
|
||||
PrecisionUnderflow
|
||||
|
||||
// PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32.
|
||||
PrecisionOverflow
|
||||
)
|
||||
|
||||
// PrecisionFromfloat32 returns Precision without performing
|
||||
// the conversion. Conversions from both Infinity and NaN
|
||||
// values will always report PrecisionExact even if NaN payload
|
||||
// or NaN-Quiet-Bit is lost. This function is kept simple to
|
||||
// allow inlining and run < 0.5 ns/op, to serve as a fast filter.
|
||||
func PrecisionFromfloat32(f32 float32) Precision {
|
||||
u32 := math.Float32bits(f32)
|
||||
|
||||
if u32 == 0 || u32 == 0x80000000 {
|
||||
// +- zero will always be exact conversion
|
||||
return PrecisionExact
|
||||
}
|
||||
|
||||
const COEFMASK uint32 = 0x7fffff // 23 least significant bits
|
||||
const EXPSHIFT uint32 = 23
|
||||
const EXPBIAS uint32 = 127
|
||||
const EXPMASK uint32 = uint32(0xff) << EXPSHIFT
|
||||
const DROPMASK uint32 = COEFMASK >> 10
|
||||
|
||||
exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS)
|
||||
coef := u32 & COEFMASK
|
||||
|
||||
if exp == 128 {
|
||||
// +- infinity or NaN
|
||||
// apps may want to do extra checks for NaN separately
|
||||
return PrecisionExact
|
||||
}
|
||||
|
||||
// https://en.wikipedia.org/wiki/Half-precision_floating-point_format says,
|
||||
// "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24"
|
||||
if exp < -24 {
|
||||
return PrecisionUnderflow
|
||||
}
|
||||
if exp > 15 {
|
||||
return PrecisionOverflow
|
||||
}
|
||||
if (coef & DROPMASK) != uint32(0) {
|
||||
// these include subnormals and non-subnormals that dropped bits
|
||||
return PrecisionInexact
|
||||
}
|
||||
|
||||
if exp < -14 {
|
||||
// Subnormals. Caller may want to test these further.
|
||||
// There are 2046 subnormals that can successfully round-trip f32->f16->f32
|
||||
// and 20 of those 2046 have 32-bit input coef == 0.
|
||||
// RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value"
|
||||
// so some protocols and libraries will choose to handle subnormals differently
|
||||
// when deciding to encode them to CBOR float32 vs float16.
|
||||
return PrecisionUnknown
|
||||
}
|
||||
|
||||
return PrecisionExact
|
||||
}
|
||||
|
||||
// Frombits returns the float16 number corresponding to the IEEE 754 binary16
|
||||
// representation u16, with the sign bit of u16 and the result in the same bit
|
||||
// position. Frombits(Bits(x)) == x.
|
||||
func Frombits(u16 uint16) Float16 {
|
||||
return Float16(u16)
|
||||
}
|
||||
|
||||
// Fromfloat32 returns a Float16 value converted from f32. Conversion uses
|
||||
// IEEE default rounding (nearest int, with ties to even).
|
||||
func Fromfloat32(f32 float32) Float16 {
|
||||
return Float16(f32bitsToF16bits(math.Float32bits(f32)))
|
||||
}
|
||||
|
||||
// ErrInvalidNaNValue indicates a NaN was not received.
|
||||
const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
|
||||
|
||||
type float16Error string
|
||||
|
||||
func (e float16Error) Error() string { return string(e) }
|
||||
|
||||
// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both
|
||||
// signaling and payload. Unlike Fromfloat32(), which can only return
|
||||
// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN.
|
||||
// If the result is infinity (sNaN with empty payload), then the
|
||||
// lowest bit of payload is set to make the result a NaN.
|
||||
// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN.
|
||||
// This function was kept simple to be able to inline.
|
||||
func FromNaN32ps(nan float32) (Float16, error) {
|
||||
const SNAN = Float16(uint16(0x7c01)) // signalling NaN
|
||||
|
||||
u32 := math.Float32bits(nan)
|
||||
sign := u32 & 0x80000000
|
||||
exp := u32 & 0x7f800000
|
||||
coef := u32 & 0x007fffff
|
||||
|
||||
if (exp != 0x7f800000) || (coef == 0) {
|
||||
return SNAN, ErrInvalidNaNValue
|
||||
}
|
||||
|
||||
u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13))
|
||||
|
||||
if (u16 & 0x03ff) == 0 {
|
||||
// result became infinity, make it NaN by setting lowest bit in payload
|
||||
u16 = u16 | 0x0001
|
||||
}
|
||||
|
||||
return Float16(u16), nil
|
||||
}
|
||||
|
||||
// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN).
|
||||
// Returned NaN value 0x7e01 has all exponent bits = 1 with the
|
||||
// first and last bits = 1 in the significand. This is consistent
|
||||
// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00.
|
||||
func NaN() Float16 {
|
||||
return Float16(0x7e01)
|
||||
}
|
||||
|
||||
// Inf returns a Float16 with an infinity value with the specified sign.
|
||||
// A sign >= returns positive infinity.
|
||||
// A sign < 0 returns negative infinity.
|
||||
func Inf(sign int) Float16 {
|
||||
if sign >= 0 {
|
||||
return Float16(0x7c00)
|
||||
}
|
||||
return Float16(0x8000 | 0x7c00)
|
||||
}
|
||||
|
||||
// Float32 returns a float32 converted from f (Float16).
|
||||
// This is a lossless conversion.
|
||||
func (f Float16) Float32() float32 {
|
||||
u32 := f16bitsToF32bits(uint16(f))
|
||||
return math.Float32frombits(u32)
|
||||
}
|
||||
|
||||
// Bits returns the IEEE 754 binary16 representation of f, with the sign bit
|
||||
// of f and the result in the same bit position. Bits(Frombits(x)) == x.
|
||||
func (f Float16) Bits() uint16 {
|
||||
return uint16(f)
|
||||
}
|
||||
|
||||
// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value.
|
||||
func (f Float16) IsNaN() bool {
|
||||
return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0)
|
||||
}
|
||||
|
||||
// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16
|
||||
// “not-a-number” value.
|
||||
func (f Float16) IsQuietNaN() bool {
|
||||
return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0)
|
||||
}
|
||||
|
||||
// IsInf reports whether f is an infinity (inf).
|
||||
// A sign > 0 reports whether f is positive inf.
|
||||
// A sign < 0 reports whether f is negative inf.
|
||||
// A sign == 0 reports whether f is either inf.
|
||||
func (f Float16) IsInf(sign int) bool {
|
||||
return ((f == 0x7c00) && sign >= 0) ||
|
||||
(f == 0xfc00 && sign <= 0)
|
||||
}
|
||||
|
||||
// IsFinite returns true if f is neither infinite nor NaN.
|
||||
func (f Float16) IsFinite() bool {
|
||||
return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00)
|
||||
}
|
||||
|
||||
// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN.
|
||||
func (f Float16) IsNormal() bool {
|
||||
exp := uint16(f) & uint16(0x7c00)
|
||||
return (exp != uint16(0x7c00)) && (exp != 0)
|
||||
}
|
||||
|
||||
// Signbit reports whether f is negative or negative zero.
|
||||
func (f Float16) Signbit() bool {
|
||||
return (uint16(f) & uint16(0x8000)) != 0
|
||||
}
|
||||
|
||||
// String satisfies the fmt.Stringer interface.
|
||||
func (f Float16) String() string {
|
||||
return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32)
|
||||
}
|
||||
|
||||
// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16.
|
||||
func f16bitsToF32bits(in uint16) uint32 {
|
||||
// All 65536 conversions with this were confirmed to be correct
|
||||
// by Montgomery Edwards⁴⁴⁸ (github.com/x448).
|
||||
|
||||
sign := uint32(in&0x8000) << 16 // sign for 32-bit
|
||||
exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit
|
||||
coef := uint32(in&0x03ff) << 13 // significand for 32-bit
|
||||
|
||||
if exp == 0x1f {
|
||||
if coef == 0 {
|
||||
// infinity
|
||||
return sign | 0x7f800000 | coef
|
||||
}
|
||||
// NaN
|
||||
return sign | 0x7fc00000 | coef
|
||||
}
|
||||
|
||||
if exp == 0 {
|
||||
if coef == 0 {
|
||||
// zero
|
||||
return sign
|
||||
}
|
||||
|
||||
// normalize subnormal numbers
|
||||
exp++
|
||||
for coef&0x7f800000 == 0 {
|
||||
coef <<= 1
|
||||
exp--
|
||||
}
|
||||
coef &= 0x007fffff
|
||||
}
|
||||
|
||||
return sign | ((exp + (0x7f - 0xf)) << 23) | coef
|
||||
}
|
||||
|
||||
// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32.
|
||||
// Conversion rounds to nearest integer with ties to even.
|
||||
func f32bitsToF16bits(u32 uint32) uint16 {
|
||||
// Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448).
|
||||
// All 4294967296 conversions with this were confirmed to be correct by x448.
|
||||
// Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license.
|
||||
|
||||
sign := u32 & 0x80000000
|
||||
exp := u32 & 0x7f800000
|
||||
coef := u32 & 0x007fffff
|
||||
|
||||
if exp == 0x7f800000 {
|
||||
// NaN or Infinity
|
||||
nanBit := uint32(0)
|
||||
if coef != 0 {
|
||||
nanBit = uint32(0x0200)
|
||||
}
|
||||
return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13))
|
||||
}
|
||||
|
||||
halfSign := sign >> 16
|
||||
|
||||
unbiasedExp := int32(exp>>23) - 127
|
||||
halfExp := unbiasedExp + 15
|
||||
|
||||
if halfExp >= 0x1f {
|
||||
return uint16(halfSign | uint32(0x7c00))
|
||||
}
|
||||
|
||||
if halfExp <= 0 {
|
||||
if 14-halfExp > 24 {
|
||||
return uint16(halfSign)
|
||||
}
|
||||
coef := coef | uint32(0x00800000)
|
||||
halfCoef := coef >> uint32(14-halfExp)
|
||||
roundBit := uint32(1) << uint32(13-halfExp)
|
||||
if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
|
||||
halfCoef++
|
||||
}
|
||||
return uint16(halfSign | halfCoef)
|
||||
}
|
||||
|
||||
uHalfExp := uint32(halfExp) << 10
|
||||
halfCoef := coef >> 13
|
||||
roundBit := uint32(0x00001000)
|
||||
if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
|
||||
return uint16((halfSign | uHalfExp | halfCoef) + 1)
|
||||
}
|
||||
return uint16(halfSign | uHalfExp | halfCoef)
|
||||
}
|
||||
Reference in New Issue
Block a user