mirror of
https://github.com/genuinetools/reg.git
synced 2024-05-10 00:38:31 -04:00
update vendor
Signed-off-by: Jess Frazelle <acidburn@jessfraz.com>
This commit is contained in:
parent
eaee914121
commit
f180c93a39
|
@ -11,7 +11,7 @@ jobs:
|
||||||
include:
|
include:
|
||||||
- script:
|
- script:
|
||||||
- make fmt lint staticcheck vet install
|
- make fmt lint staticcheck vet install
|
||||||
- DOCKER_API_VERSION=1.38 make dind dtest
|
- DOCKER_API_VERSION=1.39 make dind dtest
|
||||||
- stage: Build Release
|
- stage: Build Release
|
||||||
script:
|
script:
|
||||||
- make release
|
- make release
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.12-alpine as builder
|
FROM golang:alpine as builder
|
||||||
MAINTAINER Jessica Frazelle <jess@linux.com>
|
MAINTAINER Jessica Frazelle <jess@linux.com>
|
||||||
|
|
||||||
ENV PATH /go/bin:/usr/local/go/bin:$PATH
|
ENV PATH /go/bin:/usr/local/go/bin:$PATH
|
||||||
|
|
80
go.mod
80
go.mod
|
@ -1,73 +1,39 @@
|
||||||
module github.com/genuinetools/reg
|
module github.com/genuinetools/reg
|
||||||
|
|
||||||
|
replace github.com/Sirupsen/logrus => github.com/sirupsen/logrus v1.4.3-0.20190807103436-de736cf91b92
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.38.0 // indirect
|
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||||
github.com/Microsoft/go-winio v0.4.12 // indirect
|
github.com/Microsoft/go-winio v0.4.14 // indirect
|
||||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
github.com/Microsoft/hcsshim v0.8.6 // indirect
|
||||||
github.com/beorn7/perks v1.0.0 // indirect
|
github.com/containerd/containerd v1.2.9 // indirect
|
||||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
|
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 // indirect
|
||||||
github.com/coreos/clair v2.0.8+incompatible
|
github.com/coreos/clair v2.0.1-0.20190910143208-94150ab1f4ac+incompatible
|
||||||
github.com/docker/cli v0.0.0-20190429053159-3273c2e23546 // indirect
|
github.com/deckarep/golang-set v1.7.1 // indirect
|
||||||
|
github.com/docker/cli v0.0.0-20190913211141-95327f4e6241
|
||||||
github.com/docker/distribution v2.7.1+incompatible
|
github.com/docker/distribution v2.7.1+incompatible
|
||||||
github.com/docker/docker v0.0.0-20180924202107-a9c061deec0f
|
github.com/docker/docker v1.4.2-0.20190916154449-92cc603036dd
|
||||||
github.com/docker/docker-ce v0.0.0-20180924210327-f53bd8bb8e43
|
github.com/docker/docker-credential-helpers v0.6.3 // indirect
|
||||||
github.com/docker/docker-credential-helpers v0.6.2 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-connections v0.0.0-20180821093606-97c2040d34df // indirect
|
|
||||||
github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect
|
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
|
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
|
||||||
github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e // indirect
|
github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e // indirect
|
||||||
github.com/genuinetools/pkg v0.0.0-20181022210355-2fcf164d37cb
|
github.com/genuinetools/pkg v0.0.0-20181022210355-2fcf164d37cb
|
||||||
github.com/go-logfmt/logfmt v0.4.0 // indirect
|
github.com/gogo/protobuf v1.3.0 // indirect
|
||||||
github.com/gogo/protobuf v1.2.1 // indirect
|
github.com/google/go-cmp v0.3.1
|
||||||
github.com/golang/lint v0.0.0-20190409202823-959b441ac422 // indirect
|
github.com/gorilla/mux v1.7.3
|
||||||
github.com/golang/mock v1.3.0 // indirect
|
github.com/grpc-ecosystem/grpc-gateway v1.11.1 // indirect
|
||||||
github.com/google/btree v1.0.0 // indirect
|
|
||||||
github.com/google/go-cmp v0.3.0
|
|
||||||
github.com/google/pprof v0.0.0-20190502144155-8358a9778bd1 // indirect
|
|
||||||
github.com/gorilla/context v1.1.1 // indirect
|
|
||||||
github.com/gorilla/mux v1.7.1
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect
|
|
||||||
github.com/hashicorp/golang-lru v0.5.1 // indirect
|
|
||||||
github.com/kisielk/errcheck v1.2.0 // indirect
|
|
||||||
github.com/kisielk/gotool v1.0.0 // indirect
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
|
||||||
github.com/kr/pty v1.1.4 // indirect
|
|
||||||
github.com/mitchellh/go-wordwrap v1.0.0
|
github.com/mitchellh/go-wordwrap v1.0.0
|
||||||
github.com/onsi/ginkgo v1.8.0 // indirect
|
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
|
||||||
github.com/onsi/gomega v1.5.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2
|
|
||||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||||
github.com/opencontainers/runc v0.1.1 // indirect
|
github.com/opencontainers/runc v0.1.1 // indirect
|
||||||
github.com/peterhellberg/link v1.0.0
|
github.com/peterhellberg/link v1.0.0
|
||||||
github.com/pkg/errors v0.8.1 // indirect
|
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||||
github.com/prometheus/client_golang v0.9.2 // indirect
|
github.com/sirupsen/logrus v1.4.2
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect
|
|
||||||
github.com/prometheus/common v0.3.0 // indirect
|
|
||||||
github.com/prometheus/procfs v0.0.0-20190503130316-740c07785007 // indirect
|
|
||||||
github.com/rogpeppe/fastuuid v1.0.0 // indirect
|
|
||||||
github.com/shurcooL/httpfs v0.0.0-20181222201310-74dc9339e414
|
|
||||||
github.com/sirupsen/logrus v1.4.1
|
|
||||||
github.com/stretchr/objx v0.2.0 // indirect
|
|
||||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 // indirect
|
|
||||||
golang.org/x/exp v0.0.0-20190429183610-475c5042d3f1 // indirect
|
|
||||||
golang.org/x/image v0.0.0-20190501045829-6d32002ffd75 // indirect
|
|
||||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422 // indirect
|
|
||||||
golang.org/x/mobile v0.0.0-20190415191353-3e0bab5405d6 // indirect
|
|
||||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c // indirect
|
|
||||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a // indirect
|
|
||||||
golang.org/x/sys v0.0.0-20190506115046-ca7f33d4116e // indirect
|
|
||||||
golang.org/x/text v0.3.2 // indirect
|
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
|
||||||
golang.org/x/tools v0.0.0-20190503185657-3b6f9c0030f7 // indirect
|
google.golang.org/grpc v1.23.1
|
||||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 // indirect
|
|
||||||
google.golang.org/grpc v1.20.1
|
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
|
|
||||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.2.2 // indirect
|
|
||||||
gotest.tools v2.2.0+incompatible // indirect
|
gotest.tools v2.2.0+incompatible // indirect
|
||||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a // indirect
|
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/golang/lint v0.0.0-20190409202823-959b441ac422 => github.com/golang/lint v0.0.0-20190409202823-5614ed5bae6fb75893070bdc0996a68765fdd275
|
|
||||||
|
|
260
go.sum
260
go.sum
|
@ -1,270 +1,124 @@
|
||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
|
||||||
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
|
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=
|
||||||
github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
|
||||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/containerd/continuity v0.0.0-20180921161001-7f53d412b9eb h1:qSMRxG547z/BgQmyVyADxaMADQXVAD9uleP2sQeClbo=
|
github.com/containerd/containerd v1.2.9 h1:6tyNjBmAMG47QuFPIT9LgiiexoVxC6qpTGR+eD0R0Z8=
|
||||||
github.com/containerd/continuity v0.0.0-20180921161001-7f53d412b9eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
github.com/containerd/containerd v1.2.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw=
|
||||||
github.com/coreos/clair v0.0.0-20180919182544-44ae4bc9590a h1:glxUtT0RlaVJU86kg78ygzfhwW6D+uj5H+aOK01QDgI=
|
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||||
github.com/coreos/clair v0.0.0-20180919182544-44ae4bc9590a/go.mod h1:uXhHPWAoRqw0jJc2f8RrPCwRhIo9otQ8OEWUFtpCiwA=
|
github.com/coreos/clair v2.0.1-0.20190910143208-94150ab1f4ac+incompatible h1:V+KI58nlBLYl1KMqBc0Ollhik0gIp2dDkPZQsg3NipQ=
|
||||||
github.com/coreos/clair v2.0.8+incompatible h1:MgLazOCxgOu/LWHoqitz/ZTb2cLAe/Y6G4NznmmyJwA=
|
github.com/coreos/clair v2.0.1-0.20190910143208-94150ab1f4ac+incompatible/go.mod h1:uXhHPWAoRqw0jJc2f8RrPCwRhIo9otQ8OEWUFtpCiwA=
|
||||||
github.com/coreos/clair v2.0.8+incompatible/go.mod h1:uXhHPWAoRqw0jJc2f8RrPCwRhIo9otQ8OEWUFtpCiwA=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/docker/cli v0.0.0-20180920165730-54c19e67f69c h1:QlAVcyoF7QQVN7zV+xYBjgwtRVlRU3WCTCpb2mcqQrM=
|
github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=
|
||||||
github.com/docker/cli v0.0.0-20180920165730-54c19e67f69c/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||||
github.com/docker/cli v0.0.0-20190429053159-3273c2e23546 h1:MjppZ9NE8BEvA2QdHmz3JWF1xzxFzOyZra9bTbGQIKs=
|
github.com/docker/cli v0.0.0-20190913211141-95327f4e6241 h1:btTBgRvrdoe+b7NfX/7PnUbiXzGceLCt09QZkg1bgqA=
|
||||||
github.com/docker/cli v0.0.0-20190429053159-3273c2e23546/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
github.com/docker/cli v0.0.0-20190913211141-95327f4e6241/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||||
github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f h1:hYf+mPizfvpH6VgIxdntnOmQHd1F1mQUc1oG+j3Ol2g=
|
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||||
github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v0.0.0-20180924202107-a9c061deec0f h1:W4fbqg0JUwy6lLesoJaV/rE0fwAmtdtinMa64X1CEh0=
|
github.com/docker/docker v1.4.2-0.20190916154449-92cc603036dd h1:kDIT0qjvLHbdL86aa+VteVpVZOR7coIyIejM/o3CwOo=
|
||||||
github.com/docker/docker v0.0.0-20180924202107-a9c061deec0f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v1.4.2-0.20190916154449-92cc603036dd/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker-ce v0.0.0-20180924210327-f53bd8bb8e43 h1:gZ4lWixV821UVbYtr+oz1ZPCHkbtE+ivfmHyZRgyl2Y=
|
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
|
||||||
github.com/docker/docker-ce v0.0.0-20180924210327-f53bd8bb8e43/go.mod h1:l1FUGRYBvbjnZ8MS6A2xOji4aZFlY/Qmgz7p4oXH7ac=
|
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||||
github.com/docker/docker-credential-helpers v0.6.1 h1:Dq4iIfcM7cNtddhLVWe9h4QDjsi4OER3Z8voPu/I52g=
|
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||||
github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||||
github.com/docker/docker-credential-helpers v0.6.2 h1:CrW9H1VMf3a4GrtyAi7IUJjkJVpwBBpX0+mvkvYJaus=
|
|
||||||
github.com/docker/docker-credential-helpers v0.6.2/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
|
||||||
github.com/docker/go-connections v0.0.0-20180821093606-97c2040d34df h1:ADMjlaDGEn0OOQIieyxanhAt41jcngf8rf78X2eKNLw=
|
|
||||||
github.com/docker/go-connections v0.0.0-20180821093606-97c2040d34df/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
|
||||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA=
|
|
||||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
|
||||||
github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
|
||||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
|
||||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
|
||||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
|
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
|
||||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
||||||
github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e h1:P10tZmVD2XclAaT9l7OduMH1OLFzTa1wUuUqHZnEdI0=
|
github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e h1:P10tZmVD2XclAaT9l7OduMH1OLFzTa1wUuUqHZnEdI0=
|
||||||
github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c=
|
github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
|
||||||
github.com/genuinetools/pkg v0.0.0-20180910213200-1c141f661797 h1:SGpZXDd/CFeDIY4Rq5cFO8K/uqDblHUxjlzOmjFpvRg=
|
|
||||||
github.com/genuinetools/pkg v0.0.0-20180910213200-1c141f661797/go.mod h1:XTcrCYlXPxnxL2UpnwuRn7tcaTn9HAhxFoFJucootk8=
|
|
||||||
github.com/genuinetools/pkg v0.0.0-20181022210355-2fcf164d37cb h1:9MQ4N7zyYTtdjLGqE5McDbgjIjqR5TAPc6lytEOdndc=
|
github.com/genuinetools/pkg v0.0.0-20181022210355-2fcf164d37cb h1:9MQ4N7zyYTtdjLGqE5McDbgjIjqR5TAPc6lytEOdndc=
|
||||||
github.com/genuinetools/pkg v0.0.0-20181022210355-2fcf164d37cb/go.mod h1:XTcrCYlXPxnxL2UpnwuRn7tcaTn9HAhxFoFJucootk8=
|
github.com/genuinetools/pkg v0.0.0-20181022210355-2fcf164d37cb/go.mod h1:XTcrCYlXPxnxL2UpnwuRn7tcaTn9HAhxFoFJucootk8=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
|
||||||
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
|
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
|
||||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
|
||||||
github.com/golang/lint v0.0.0-20190409202823-5614ed5bae6fb75893070bdc0996a68765fdd275/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/mock v1.3.0/go.mod h1:c8YoAQJ7+qIz9IQm9G72MJ4uDcrPeLjkrQ4yYIHdhyw=
|
|
||||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
|
||||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||||
github.com/google/pprof v0.0.0-20190502144155-8358a9778bd1/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/grpc-ecosystem/grpc-gateway v1.11.1 h1:/dBYI+n4xIL+Y9SKXQrjlKTmJJDwCSlNLRwZ5nBhIek=
|
||||||
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
|
github.com/grpc-ecosystem/grpc-gateway v1.11.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
|
||||||
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
|
|
||||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
|
||||||
github.com/gorilla/mux v1.7.1 h1:Dw4jY2nghMMRsh1ol8dv1axHkDwMQK2DHerMNJsIpJU=
|
|
||||||
github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
|
||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
|
||||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
|
||||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
|
||||||
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
|
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
|
||||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||||
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
|
||||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 h1:QhPf3A2AZW3tTGvHPg0TA+CR3oHbVLlXUhlghqISp1I=
|
|
||||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
|
||||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
|
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
|
||||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/peterhellberg/link v1.0.0 h1:mUWkiegowUXEcmlb+ybF75Q/8D2Y0BjZtR8cxoKhaQo=
|
github.com/peterhellberg/link v1.0.0 h1:mUWkiegowUXEcmlb+ybF75Q/8D2Y0BjZtR8cxoKhaQo=
|
||||||
github.com/peterhellberg/link v1.0.0/go.mod h1:gtSlOT4jmkY8P47hbTc8PTgiDDWpdPbFYl75keYyBB8=
|
github.com/peterhellberg/link v1.0.0/go.mod h1:gtSlOT4jmkY8P47hbTc8PTgiDDWpdPbFYl75keYyBB8=
|
||||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v0.0.0-20180924113449-f69c853d21c1 h1:mEzWvBiJdUbhqHRT6kNSGzD6IDcWCWF2uAhrEEE740M=
|
|
||||||
github.com/prometheus/client_golang v0.0.0-20180924113449-f69c853d21c1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
|
||||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
|
||||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54=
|
|
||||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
|
||||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
|
||||||
github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20180920065004-418d78d0b9a7 h1:NgR6WN8nQ4SmFC1sSUHY8SriLuWCZ6cCIQtH4vDZN3c=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20180920065004-418d78d0b9a7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20190503130316-740c07785007/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
github.com/rogpeppe/fastuuid v1.0.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
|
||||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo=
|
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
|
||||||
github.com/shurcooL/httpfs v0.0.0-20181222201310-74dc9339e414 h1:IYVb70m/qpJGjyZV2S4qbdSDnsMl+w9nsQ2iQedf1HI=
|
|
||||||
github.com/shurcooL/httpfs v0.0.0-20181222201310-74dc9339e414/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
|
||||||
github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s=
|
|
||||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
|
||||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
|
||||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||||
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
|
||||||
golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b h1:2b9XGzhjiYsYPnKXoEfL7klWZQIt8IfyRCz62gCqqlQ=
|
|
||||||
golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190429183610-475c5042d3f1/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
|
||||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
|
||||||
golang.org/x/image v0.0.0-20190501045829-6d32002ffd75/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
|
||||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
|
||||||
golang.org/x/mobile v0.0.0-20190415191353-3e0bab5405d6/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180925072008-f04abc6bdfa7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw=
|
|
||||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e h1:LSlw/Dbj0MkNvPYAAkGinYmGliq+aqS7eKPYlE4oWC4=
|
|
||||||
golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
|
||||||
golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190506115046-ca7f33d4116e h1:bq5BY1tGuaK8HxuwN6pT6kWgTVLeJ5KwuyBpsl1CZL4=
|
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||||
golang.org/x/sys v0.0.0-20190506115046-ca7f33d4116e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||||
golang.org/x/text v0.3.1/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|
||||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
|
||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190503185657-3b6f9c0030f7/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20180924164928-221a8d4f7494/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg=
|
|
||||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
|
||||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
|
||||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
|
||||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
|
||||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
|
|
|
@ -4,8 +4,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/config"
|
||||||
|
clitypes "github.com/docker/cli/cli/config/types"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker-ce/components/cli/cli/config"
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
@ -56,16 +57,16 @@ func GetAuthConfig(username, password, registry string) (types.AuthConfig, error
|
||||||
if registry != "" {
|
if registry != "" {
|
||||||
// try with the user input
|
// try with the user input
|
||||||
if creds, ok := authConfigs[registry]; ok {
|
if creds, ok := authConfigs[registry]; ok {
|
||||||
fixAuthConfig(&creds, registry)
|
c := fixAuthConfig(creds, registry)
|
||||||
return creds, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove https:// from user input and try again
|
// remove https:// from user input and try again
|
||||||
if strings.HasPrefix(registry, "https://") {
|
if strings.HasPrefix(registry, "https://") {
|
||||||
registryCleaned := strings.TrimPrefix(registry, "https://")
|
registryCleaned := strings.TrimPrefix(registry, "https://")
|
||||||
if creds, ok := authConfigs[registryCleaned]; ok {
|
if creds, ok := authConfigs[registryCleaned]; ok {
|
||||||
fixAuthConfig(&creds, registryCleaned)
|
c := fixAuthConfig(creds, registryCleaned)
|
||||||
return creds, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,8 +74,8 @@ func GetAuthConfig(username, password, registry string) (types.AuthConfig, error
|
||||||
if strings.HasPrefix(registry, "http://") {
|
if strings.HasPrefix(registry, "http://") {
|
||||||
registryCleaned := strings.TrimPrefix(registry, "http://")
|
registryCleaned := strings.TrimPrefix(registry, "http://")
|
||||||
if creds, ok := authConfigs[registryCleaned]; ok {
|
if creds, ok := authConfigs[registryCleaned]; ok {
|
||||||
fixAuthConfig(&creds, registryCleaned)
|
c := fixAuthConfig(creds, registryCleaned)
|
||||||
return creds, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,8 +84,8 @@ func GetAuthConfig(username, password, registry string) (types.AuthConfig, error
|
||||||
if !strings.HasPrefix(registry, "https://") && !strings.HasPrefix(registry, "http://") {
|
if !strings.HasPrefix(registry, "https://") && !strings.HasPrefix(registry, "http://") {
|
||||||
registryCleaned := "https://" + registry
|
registryCleaned := "https://" + registry
|
||||||
if creds, ok := authConfigs[registryCleaned]; ok {
|
if creds, ok := authConfigs[registryCleaned]; ok {
|
||||||
fixAuthConfig(&creds, registryCleaned)
|
c := fixAuthConfig(creds, registryCleaned)
|
||||||
return creds, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +101,8 @@ func GetAuthConfig(username, password, registry string) (types.AuthConfig, error
|
||||||
// found in the auth config.
|
// found in the auth config.
|
||||||
for _, creds := range authConfigs {
|
for _, creds := range authConfigs {
|
||||||
fmt.Printf("No registry passed. Using registry %q\n", creds.ServerAddress)
|
fmt.Printf("No registry passed. Using registry %q\n", creds.ServerAddress)
|
||||||
return creds, nil
|
c := fixAuthConfig(creds, creds.ServerAddress)
|
||||||
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't use any authentication.
|
// Don't use any authentication.
|
||||||
|
@ -113,10 +115,20 @@ func GetAuthConfig(username, password, registry string) (types.AuthConfig, error
|
||||||
// registry value if ServerAddress is empty. For example, config.Load() will
|
// registry value if ServerAddress is empty. For example, config.Load() will
|
||||||
// return AuthConfigs with empty ServerAddresses if the configuration file
|
// return AuthConfigs with empty ServerAddresses if the configuration file
|
||||||
// contains only an "credsHelper" object.
|
// contains only an "credsHelper" object.
|
||||||
func fixAuthConfig(creds *types.AuthConfig, registry string) {
|
func fixAuthConfig(creds clitypes.AuthConfig, registry string) (c types.AuthConfig) {
|
||||||
|
c.Username = creds.Username
|
||||||
|
c.Password = creds.Password
|
||||||
|
c.Auth = creds.Auth
|
||||||
|
c.Email = creds.Email
|
||||||
|
c.IdentityToken = creds.IdentityToken
|
||||||
|
c.RegistryToken = creds.RegistryToken
|
||||||
|
|
||||||
|
c.ServerAddress = creds.ServerAddress
|
||||||
if creds.ServerAddress == "" {
|
if creds.ServerAddress == "" {
|
||||||
creds.ServerAddress = registry
|
c.ServerAddress = registry
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRepoAndRef parses the repo name and reference.
|
// GetRepoAndRef parses the repo name and reference.
|
||||||
|
|
|
@ -7,8 +7,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/config"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker-ce/components/cli/cli/config"
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
)
|
)
|
||||||
|
|
18
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
18
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
|
@ -16,6 +16,7 @@ import (
|
||||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||||
|
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||||
|
|
||||||
type atomicBool int32
|
type atomicBool int32
|
||||||
|
|
||||||
|
@ -79,6 +80,7 @@ type win32File struct {
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
wgLock sync.RWMutex
|
wgLock sync.RWMutex
|
||||||
closing atomicBool
|
closing atomicBool
|
||||||
|
socket bool
|
||||||
readDeadline deadlineHandler
|
readDeadline deadlineHandler
|
||||||
writeDeadline deadlineHandler
|
writeDeadline deadlineHandler
|
||||||
}
|
}
|
||||||
|
@ -109,7 +111,13 @@ func makeWin32File(h syscall.Handle) (*win32File, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||||
return makeWin32File(h)
|
// If we return the result of makeWin32File directly, it can result in an
|
||||||
|
// interface-wrapped nil, rather than a nil interface value.
|
||||||
|
f, err := makeWin32File(h)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeHandle closes the resources associated with a Win32 handle
|
// closeHandle closes the resources associated with a Win32 handle
|
||||||
|
@ -190,6 +198,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||||
if f.closing.isSet() {
|
if f.closing.isSet() {
|
||||||
err = ErrFileClosed
|
err = ErrFileClosed
|
||||||
}
|
}
|
||||||
|
} else if err != nil && f.socket {
|
||||||
|
// err is from Win32. Query the overlapped structure to get the winsock error.
|
||||||
|
var bytes, flags uint32
|
||||||
|
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
|
||||||
}
|
}
|
||||||
case <-timeout:
|
case <-timeout:
|
||||||
cancelIoEx(f.handle, &c.o)
|
cancelIoEx(f.handle, &c.o)
|
||||||
|
@ -265,6 +277,10 @@ func (f *win32File) Flush() error {
|
||||||
return syscall.FlushFileBuffers(f.handle)
|
return syscall.FlushFileBuffers(f.handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *win32File) Fd() uintptr {
|
||||||
|
return uintptr(f.handle)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *deadlineHandler) set(deadline time.Time) error {
|
func (d *deadlineHandler) set(deadline time.Time) error {
|
||||||
d.setLock.Lock()
|
d.setLock.Lock()
|
||||||
defer d.setLock.Unlock()
|
defer d.setLock.Unlock()
|
||||||
|
|
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
Normal file
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
module github.com/Microsoft/go-winio
|
||||||
|
|
||||||
|
go 1.12
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/pkg/errors v0.8.1
|
||||||
|
github.com/sirupsen/logrus v1.4.1
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b
|
||||||
|
)
|
16
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
Normal file
16
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
||||||
|
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
305
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
305
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
|
@ -0,0 +1,305 @@
|
||||||
|
package winio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/Microsoft/go-winio/pkg/guid"
|
||||||
|
)
|
||||||
|
|
||||||
|
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
|
||||||
|
|
||||||
|
const (
|
||||||
|
afHvSock = 34 // AF_HYPERV
|
||||||
|
|
||||||
|
socketError = ^uintptr(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
// An HvsockAddr is an address for a AF_HYPERV socket.
|
||||||
|
type HvsockAddr struct {
|
||||||
|
VMID guid.GUID
|
||||||
|
ServiceID guid.GUID
|
||||||
|
}
|
||||||
|
|
||||||
|
type rawHvsockAddr struct {
|
||||||
|
Family uint16
|
||||||
|
_ uint16
|
||||||
|
VMID guid.GUID
|
||||||
|
ServiceID guid.GUID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Network returns the address's network name, "hvsock".
|
||||||
|
func (addr *HvsockAddr) Network() string {
|
||||||
|
return "hvsock"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (addr *HvsockAddr) String() string {
|
||||||
|
return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
|
||||||
|
func VsockServiceID(port uint32) guid.GUID {
|
||||||
|
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
|
||||||
|
g.Data1 = port
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
|
func (addr *HvsockAddr) raw() rawHvsockAddr {
|
||||||
|
return rawHvsockAddr{
|
||||||
|
Family: afHvSock,
|
||||||
|
VMID: addr.VMID,
|
||||||
|
ServiceID: addr.ServiceID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
|
||||||
|
addr.VMID = raw.VMID
|
||||||
|
addr.ServiceID = raw.ServiceID
|
||||||
|
}
|
||||||
|
|
||||||
|
// HvsockListener is a socket listener for the AF_HYPERV address family.
|
||||||
|
type HvsockListener struct {
|
||||||
|
sock *win32File
|
||||||
|
addr HvsockAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
// HvsockConn is a connected socket of the AF_HYPERV address family.
|
||||||
|
type HvsockConn struct {
|
||||||
|
sock *win32File
|
||||||
|
local, remote HvsockAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHvSocket() (*win32File, error) {
|
||||||
|
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, os.NewSyscallError("socket", err)
|
||||||
|
}
|
||||||
|
f, err := makeWin32File(fd)
|
||||||
|
if err != nil {
|
||||||
|
syscall.Close(fd)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f.socket = true
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenHvsock listens for connections on the specified hvsock address.
|
||||||
|
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
|
||||||
|
l := &HvsockListener{addr: *addr}
|
||||||
|
sock, err := newHvSocket()
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("listen", err)
|
||||||
|
}
|
||||||
|
sa := addr.raw()
|
||||||
|
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
|
||||||
|
}
|
||||||
|
err = syscall.Listen(sock.handle, 16)
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
|
||||||
|
}
|
||||||
|
return &HvsockListener{sock: sock, addr: *addr}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *HvsockListener) opErr(op string, err error) error {
|
||||||
|
return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Addr returns the listener's network address.
|
||||||
|
func (l *HvsockListener) Addr() net.Addr {
|
||||||
|
return &l.addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept waits for the next connection and returns it.
|
||||||
|
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
||||||
|
sock, err := newHvSocket()
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("accept", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if sock != nil {
|
||||||
|
sock.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
c, err := l.sock.prepareIo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("accept", err)
|
||||||
|
}
|
||||||
|
defer l.sock.wg.Done()
|
||||||
|
|
||||||
|
// AcceptEx, per documentation, requires an extra 16 bytes per address.
|
||||||
|
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
|
||||||
|
var addrbuf [addrlen * 2]byte
|
||||||
|
|
||||||
|
var bytes uint32
|
||||||
|
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
|
||||||
|
_, err = l.sock.asyncIo(c, nil, bytes, err)
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||||
|
}
|
||||||
|
conn := &HvsockConn{
|
||||||
|
sock: sock,
|
||||||
|
}
|
||||||
|
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
|
||||||
|
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
|
||||||
|
sock = nil
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the listener, causing any pending Accept calls to fail.
|
||||||
|
func (l *HvsockListener) Close() error {
|
||||||
|
return l.sock.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Need to finish ConnectEx handling
|
||||||
|
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
|
||||||
|
sock, err := newHvSocket()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if sock != nil {
|
||||||
|
sock.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
c, err := sock.prepareIo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer sock.wg.Done()
|
||||||
|
var bytes uint32
|
||||||
|
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
|
||||||
|
_, err = sock.asyncIo(ctx, c, nil, bytes, err)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
conn := &HvsockConn{
|
||||||
|
sock: sock,
|
||||||
|
remote: *addr,
|
||||||
|
}
|
||||||
|
sock = nil
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (conn *HvsockConn) opErr(op string, err error) error {
|
||||||
|
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *HvsockConn) Read(b []byte) (int, error) {
|
||||||
|
c, err := conn.sock.prepareIo()
|
||||||
|
if err != nil {
|
||||||
|
return 0, conn.opErr("read", err)
|
||||||
|
}
|
||||||
|
defer conn.sock.wg.Done()
|
||||||
|
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||||
|
var flags, bytes uint32
|
||||||
|
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
||||||
|
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := err.(syscall.Errno); ok {
|
||||||
|
err = os.NewSyscallError("wsarecv", err)
|
||||||
|
}
|
||||||
|
return 0, conn.opErr("read", err)
|
||||||
|
} else if n == 0 {
|
||||||
|
err = io.EOF
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *HvsockConn) Write(b []byte) (int, error) {
|
||||||
|
t := 0
|
||||||
|
for len(b) != 0 {
|
||||||
|
n, err := conn.write(b)
|
||||||
|
if err != nil {
|
||||||
|
return t + n, err
|
||||||
|
}
|
||||||
|
t += n
|
||||||
|
b = b[n:]
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *HvsockConn) write(b []byte) (int, error) {
|
||||||
|
c, err := conn.sock.prepareIo()
|
||||||
|
if err != nil {
|
||||||
|
return 0, conn.opErr("write", err)
|
||||||
|
}
|
||||||
|
defer conn.sock.wg.Done()
|
||||||
|
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||||
|
var bytes uint32
|
||||||
|
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
||||||
|
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := err.(syscall.Errno); ok {
|
||||||
|
err = os.NewSyscallError("wsasend", err)
|
||||||
|
}
|
||||||
|
return 0, conn.opErr("write", err)
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the socket connection, failing any pending read or write calls.
|
||||||
|
func (conn *HvsockConn) Close() error {
|
||||||
|
return conn.sock.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *HvsockConn) shutdown(how int) error {
|
||||||
|
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
|
||||||
|
if err != nil {
|
||||||
|
return os.NewSyscallError("shutdown", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseRead shuts down the read end of the socket.
|
||||||
|
func (conn *HvsockConn) CloseRead() error {
|
||||||
|
err := conn.shutdown(syscall.SHUT_RD)
|
||||||
|
if err != nil {
|
||||||
|
return conn.opErr("close", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
|
||||||
|
// no more data will be written.
|
||||||
|
func (conn *HvsockConn) CloseWrite() error {
|
||||||
|
err := conn.shutdown(syscall.SHUT_WR)
|
||||||
|
if err != nil {
|
||||||
|
return conn.opErr("close", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LocalAddr returns the local address of the connection.
|
||||||
|
func (conn *HvsockConn) LocalAddr() net.Addr {
|
||||||
|
return &conn.local
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoteAddr returns the remote address of the connection.
|
||||||
|
func (conn *HvsockConn) RemoteAddr() net.Addr {
|
||||||
|
return &conn.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDeadline implements the net.Conn SetDeadline method.
|
||||||
|
func (conn *HvsockConn) SetDeadline(t time.Time) error {
|
||||||
|
conn.SetReadDeadline(t)
|
||||||
|
conn.SetWriteDeadline(t)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReadDeadline implements the net.Conn SetReadDeadline method.
|
||||||
|
func (conn *HvsockConn) SetReadDeadline(t time.Time) error {
|
||||||
|
return conn.sock.SetReadDeadline(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
|
||||||
|
func (conn *HvsockConn) SetWriteDeadline(t time.Time) error {
|
||||||
|
return conn.sock.SetWriteDeadline(t)
|
||||||
|
}
|
247
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
247
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
|
@ -3,10 +3,13 @@
|
||||||
package winio
|
package winio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
@ -18,6 +21,48 @@ import (
|
||||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||||
|
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
|
||||||
|
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||||
|
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||||
|
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
|
||||||
|
|
||||||
|
type ioStatusBlock struct {
|
||||||
|
Status, Information uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type objectAttributes struct {
|
||||||
|
Length uintptr
|
||||||
|
RootDirectory uintptr
|
||||||
|
ObjectName *unicodeString
|
||||||
|
Attributes uintptr
|
||||||
|
SecurityDescriptor *securityDescriptor
|
||||||
|
SecurityQoS uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type unicodeString struct {
|
||||||
|
Length uint16
|
||||||
|
MaximumLength uint16
|
||||||
|
Buffer uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type securityDescriptor struct {
|
||||||
|
Revision byte
|
||||||
|
Sbz1 byte
|
||||||
|
Control uint16
|
||||||
|
Owner uintptr
|
||||||
|
Group uintptr
|
||||||
|
Sacl uintptr
|
||||||
|
Dacl uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type ntstatus int32
|
||||||
|
|
||||||
|
func (status ntstatus) Err() error {
|
||||||
|
if status >= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return rtlNtStatusToDosError(status)
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||||
|
@ -25,21 +70,20 @@ const (
|
||||||
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
||||||
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
||||||
|
|
||||||
cPIPE_ACCESS_DUPLEX = 0x3
|
cSECURITY_SQOS_PRESENT = 0x100000
|
||||||
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
|
cSECURITY_ANONYMOUS = 0
|
||||||
cSECURITY_SQOS_PRESENT = 0x100000
|
|
||||||
cSECURITY_ANONYMOUS = 0
|
|
||||||
|
|
||||||
cPIPE_REJECT_REMOTE_CLIENTS = 0x8
|
|
||||||
|
|
||||||
cPIPE_UNLIMITED_INSTANCES = 255
|
|
||||||
|
|
||||||
cNMPWAIT_USE_DEFAULT_WAIT = 0
|
|
||||||
cNMPWAIT_NOWAIT = 1
|
|
||||||
|
|
||||||
cPIPE_TYPE_MESSAGE = 4
|
cPIPE_TYPE_MESSAGE = 4
|
||||||
|
|
||||||
cPIPE_READMODE_MESSAGE = 2
|
cPIPE_READMODE_MESSAGE = 2
|
||||||
|
|
||||||
|
cFILE_OPEN = 1
|
||||||
|
cFILE_CREATE = 2
|
||||||
|
|
||||||
|
cFILE_PIPE_MESSAGE_TYPE = 1
|
||||||
|
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
|
||||||
|
|
||||||
|
cSE_DACL_PRESENT = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -137,9 +181,30 @@ func (s pipeAddress) String() string {
|
||||||
return string(s)
|
return string(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||||
|
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return syscall.Handle(0), ctx.Err()
|
||||||
|
default:
|
||||||
|
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||||
|
if err == nil {
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
if err != cERROR_PIPE_BUSY {
|
||||||
|
return h, &os.PathError{Err: err, Op: "open", Path: *path}
|
||||||
|
}
|
||||||
|
// Wait 10 msec and try again. This is a rather simplistic
|
||||||
|
// view, as we always try each 10 milliseconds.
|
||||||
|
time.Sleep(time.Millisecond * 10)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DialPipe connects to a named pipe by path, timing out if the connection
|
// DialPipe connects to a named pipe by path, timing out if the connection
|
||||||
// takes longer than the specified duration. If timeout is nil, then we use
|
// takes longer than the specified duration. If timeout is nil, then we use
|
||||||
// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
|
// a default timeout of 2 seconds. (We do not use WaitNamedPipe.)
|
||||||
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||||
var absTimeout time.Time
|
var absTimeout time.Time
|
||||||
if timeout != nil {
|
if timeout != nil {
|
||||||
|
@ -147,23 +212,22 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||||
} else {
|
} else {
|
||||||
absTimeout = time.Now().Add(time.Second * 2)
|
absTimeout = time.Now().Add(time.Second * 2)
|
||||||
}
|
}
|
||||||
|
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
||||||
|
conn, err := DialPipeContext(ctx, path)
|
||||||
|
if err == context.DeadlineExceeded {
|
||||||
|
return nil, ErrTimeout
|
||||||
|
}
|
||||||
|
return conn, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||||
|
// cancellation or timeout.
|
||||||
|
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||||
var err error
|
var err error
|
||||||
var h syscall.Handle
|
var h syscall.Handle
|
||||||
for {
|
h, err = tryDialPipe(ctx, &path)
|
||||||
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
|
||||||
if err != cERROR_PIPE_BUSY {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if time.Now().After(absTimeout) {
|
|
||||||
return nil, ErrTimeout
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait 10 msec and try again. This is a rather simplistic
|
|
||||||
// view, as we always try each 10 milliseconds.
|
|
||||||
time.Sleep(time.Millisecond * 10)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &os.PathError{Op: "open", Path: path, Err: err}
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var flags uint32
|
var flags uint32
|
||||||
|
@ -194,43 +258,87 @@ type acceptResponse struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type win32PipeListener struct {
|
type win32PipeListener struct {
|
||||||
firstHandle syscall.Handle
|
firstHandle syscall.Handle
|
||||||
path string
|
path string
|
||||||
securityDescriptor []byte
|
config PipeConfig
|
||||||
config PipeConfig
|
acceptCh chan (chan acceptResponse)
|
||||||
acceptCh chan (chan acceptResponse)
|
closeCh chan int
|
||||||
closeCh chan int
|
doneCh chan int
|
||||||
doneCh chan int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
||||||
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
|
path16, err := syscall.UTF16FromString(path)
|
||||||
if first {
|
|
||||||
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
|
|
||||||
}
|
|
||||||
|
|
||||||
var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
|
|
||||||
if c.MessageMode {
|
|
||||||
mode |= cPIPE_TYPE_MESSAGE
|
|
||||||
}
|
|
||||||
|
|
||||||
sa := &syscall.SecurityAttributes{}
|
|
||||||
sa.Length = uint32(unsafe.Sizeof(*sa))
|
|
||||||
if securityDescriptor != nil {
|
|
||||||
len := uint32(len(securityDescriptor))
|
|
||||||
sa.SecurityDescriptor = localAlloc(0, len)
|
|
||||||
defer localFree(sa.SecurityDescriptor)
|
|
||||||
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
|
|
||||||
}
|
|
||||||
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var oa objectAttributes
|
||||||
|
oa.Length = unsafe.Sizeof(oa)
|
||||||
|
|
||||||
|
var ntPath unicodeString
|
||||||
|
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
|
||||||
|
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||||
|
}
|
||||||
|
defer localFree(ntPath.Buffer)
|
||||||
|
oa.ObjectName = &ntPath
|
||||||
|
|
||||||
|
// The security descriptor is only needed for the first pipe.
|
||||||
|
if first {
|
||||||
|
if sd != nil {
|
||||||
|
len := uint32(len(sd))
|
||||||
|
sdb := localAlloc(0, len)
|
||||||
|
defer localFree(sdb)
|
||||||
|
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
|
||||||
|
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
|
||||||
|
} else {
|
||||||
|
// Construct the default named pipe security descriptor.
|
||||||
|
var dacl uintptr
|
||||||
|
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
|
||||||
|
return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
|
||||||
|
}
|
||||||
|
defer localFree(dacl)
|
||||||
|
|
||||||
|
sdb := &securityDescriptor{
|
||||||
|
Revision: 1,
|
||||||
|
Control: cSE_DACL_PRESENT,
|
||||||
|
Dacl: dacl,
|
||||||
|
}
|
||||||
|
oa.SecurityDescriptor = sdb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
|
||||||
|
if c.MessageMode {
|
||||||
|
typ |= cFILE_PIPE_MESSAGE_TYPE
|
||||||
|
}
|
||||||
|
|
||||||
|
disposition := uint32(cFILE_OPEN)
|
||||||
|
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
|
||||||
|
if first {
|
||||||
|
disposition = cFILE_CREATE
|
||||||
|
// By not asking for read or write access, the named pipe file system
|
||||||
|
// will put this pipe into an initially disconnected state, blocking
|
||||||
|
// client connections until the next call with first == false.
|
||||||
|
access = syscall.SYNCHRONIZE
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout := int64(-50 * 10000) // 50ms
|
||||||
|
|
||||||
|
var (
|
||||||
|
h syscall.Handle
|
||||||
|
iosb ioStatusBlock
|
||||||
|
)
|
||||||
|
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
|
||||||
|
if err != nil {
|
||||||
|
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
runtime.KeepAlive(ntPath)
|
||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
||||||
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
|
h, err := makeServerPipeHandle(l.path, nil, &l.config, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -341,32 +449,13 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Create a client handle and connect it. This results in the pipe
|
|
||||||
// instance always existing, so that clients see ERROR_PIPE_BUSY
|
|
||||||
// rather than ERROR_FILE_NOT_FOUND. This ties the first instance
|
|
||||||
// up so that no other instances can be used. This would have been
|
|
||||||
// cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
|
|
||||||
// instead of CreateNamedPipe. (Apparently created named pipes are
|
|
||||||
// considered to be in listening state regardless of whether any
|
|
||||||
// active calls to ConnectNamedPipe are outstanding.)
|
|
||||||
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
|
||||||
if err != nil {
|
|
||||||
syscall.Close(h)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Close the client handle. The server side of the instance will
|
|
||||||
// still be busy, leading to ERROR_PIPE_BUSY instead of
|
|
||||||
// ERROR_NOT_FOUND, as long as we don't close the server handle,
|
|
||||||
// or disconnect the client with DisconnectNamedPipe.
|
|
||||||
syscall.Close(h2)
|
|
||||||
l := &win32PipeListener{
|
l := &win32PipeListener{
|
||||||
firstHandle: h,
|
firstHandle: h,
|
||||||
path: path,
|
path: path,
|
||||||
securityDescriptor: sd,
|
config: *c,
|
||||||
config: *c,
|
acceptCh: make(chan (chan acceptResponse)),
|
||||||
acceptCh: make(chan (chan acceptResponse)),
|
closeCh: make(chan int),
|
||||||
closeCh: make(chan int),
|
doneCh: make(chan int),
|
||||||
doneCh: make(chan int),
|
|
||||||
}
|
}
|
||||||
go l.listenerRoutine()
|
go l.listenerRoutine()
|
||||||
return l, nil
|
return l, nil
|
||||||
|
|
235
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
Normal file
235
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
Normal file
|
@ -0,0 +1,235 @@
|
||||||
|
// Package guid provides a GUID type. The backing structure for a GUID is
|
||||||
|
// identical to that used by the golang.org/x/sys/windows GUID type.
|
||||||
|
// There are two main binary encodings used for a GUID, the big-endian encoding,
|
||||||
|
// and the Windows (mixed-endian) encoding. See here for details:
|
||||||
|
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
|
||||||
|
package guid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||||
|
// how the entirety of the rest of the GUID is interpreted.
|
||||||
|
type Variant uint8
|
||||||
|
|
||||||
|
// The variants specified by RFC 4122.
|
||||||
|
const (
|
||||||
|
// VariantUnknown specifies a GUID variant which does not conform to one of
|
||||||
|
// the variant encodings specified in RFC 4122.
|
||||||
|
VariantUnknown Variant = iota
|
||||||
|
VariantNCS
|
||||||
|
VariantRFC4122
|
||||||
|
VariantMicrosoft
|
||||||
|
VariantFuture
|
||||||
|
)
|
||||||
|
|
||||||
|
// Version specifies how the bits in the GUID were generated. For instance, a
|
||||||
|
// version 4 GUID is randomly generated, and a version 5 is generated from the
|
||||||
|
// hash of an input string.
|
||||||
|
type Version uint8
|
||||||
|
|
||||||
|
var _ = (encoding.TextMarshaler)(GUID{})
|
||||||
|
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||||
|
|
||||||
|
// GUID represents a GUID/UUID. It has the same structure as
|
||||||
|
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||||
|
// that type. It is defined as its own type so that stringification and
|
||||||
|
// marshaling can be supported. The representation matches that used by native
|
||||||
|
// Windows code.
|
||||||
|
type GUID windows.GUID
|
||||||
|
|
||||||
|
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||||
|
func NewV4() (GUID, error) {
|
||||||
|
var b [16]byte
|
||||||
|
if _, err := rand.Read(b[:]); err != nil {
|
||||||
|
return GUID{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
g := FromArray(b)
|
||||||
|
g.setVersion(4) // Version 4 means randomly generated.
|
||||||
|
g.setVariant(VariantRFC4122)
|
||||||
|
|
||||||
|
return g, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
|
||||||
|
// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
|
||||||
|
// and the sample code treats it as a series of bytes, so we do the same here.
|
||||||
|
//
|
||||||
|
// Some implementations, such as those found on Windows, treat the name as a
|
||||||
|
// big-endian UTF16 stream of bytes. If that is desired, the string can be
|
||||||
|
// encoded as such before being passed to this function.
|
||||||
|
func NewV5(namespace GUID, name []byte) (GUID, error) {
|
||||||
|
b := sha1.New()
|
||||||
|
namespaceBytes := namespace.ToArray()
|
||||||
|
b.Write(namespaceBytes[:])
|
||||||
|
b.Write(name)
|
||||||
|
|
||||||
|
a := [16]byte{}
|
||||||
|
copy(a[:], b.Sum(nil))
|
||||||
|
|
||||||
|
g := FromArray(a)
|
||||||
|
g.setVersion(5) // Version 5 means generated from a string.
|
||||||
|
g.setVariant(VariantRFC4122)
|
||||||
|
|
||||||
|
return g, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromArray(b [16]byte, order binary.ByteOrder) GUID {
|
||||||
|
var g GUID
|
||||||
|
g.Data1 = order.Uint32(b[0:4])
|
||||||
|
g.Data2 = order.Uint16(b[4:6])
|
||||||
|
g.Data3 = order.Uint16(b[6:8])
|
||||||
|
copy(g.Data4[:], b[8:16])
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g GUID) toArray(order binary.ByteOrder) [16]byte {
|
||||||
|
b := [16]byte{}
|
||||||
|
order.PutUint32(b[0:4], g.Data1)
|
||||||
|
order.PutUint16(b[4:6], g.Data2)
|
||||||
|
order.PutUint16(b[6:8], g.Data3)
|
||||||
|
copy(b[8:16], g.Data4[:])
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
|
||||||
|
func FromArray(b [16]byte) GUID {
|
||||||
|
return fromArray(b, binary.BigEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToArray returns an array of 16 bytes representing the GUID in big-endian
|
||||||
|
// encoding.
|
||||||
|
func (g GUID) ToArray() [16]byte {
|
||||||
|
return g.toArray(binary.BigEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
|
||||||
|
func FromWindowsArray(b [16]byte) GUID {
|
||||||
|
return fromArray(b, binary.LittleEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
|
||||||
|
// encoding.
|
||||||
|
func (g GUID) ToWindowsArray() [16]byte {
|
||||||
|
return g.toArray(binary.LittleEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g GUID) String() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"%08x-%04x-%04x-%04x-%012x",
|
||||||
|
g.Data1,
|
||||||
|
g.Data2,
|
||||||
|
g.Data3,
|
||||||
|
g.Data4[:2],
|
||||||
|
g.Data4[2:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromString parses a string containing a GUID and returns the GUID. The only
|
||||||
|
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
|
||||||
|
// format.
|
||||||
|
func FromString(s string) (GUID, error) {
|
||||||
|
if len(s) != 36 {
|
||||||
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
|
}
|
||||||
|
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||||
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
var g GUID
|
||||||
|
|
||||||
|
data1, err := strconv.ParseUint(s[0:8], 16, 32)
|
||||||
|
if err != nil {
|
||||||
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
|
}
|
||||||
|
g.Data1 = uint32(data1)
|
||||||
|
|
||||||
|
data2, err := strconv.ParseUint(s[9:13], 16, 16)
|
||||||
|
if err != nil {
|
||||||
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
|
}
|
||||||
|
g.Data2 = uint16(data2)
|
||||||
|
|
||||||
|
data3, err := strconv.ParseUint(s[14:18], 16, 16)
|
||||||
|
if err != nil {
|
||||||
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
|
}
|
||||||
|
g.Data3 = uint16(data3)
|
||||||
|
|
||||||
|
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
|
||||||
|
v, err := strconv.ParseUint(s[x:x+2], 16, 8)
|
||||||
|
if err != nil {
|
||||||
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
|
}
|
||||||
|
g.Data4[i] = uint8(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return g, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GUID) setVariant(v Variant) {
|
||||||
|
d := g.Data4[0]
|
||||||
|
switch v {
|
||||||
|
case VariantNCS:
|
||||||
|
d = (d & 0x7f)
|
||||||
|
case VariantRFC4122:
|
||||||
|
d = (d & 0x3f) | 0x80
|
||||||
|
case VariantMicrosoft:
|
||||||
|
d = (d & 0x1f) | 0xc0
|
||||||
|
case VariantFuture:
|
||||||
|
d = (d & 0x0f) | 0xe0
|
||||||
|
case VariantUnknown:
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid variant: %d", v))
|
||||||
|
}
|
||||||
|
g.Data4[0] = d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variant returns the GUID variant, as defined in RFC 4122.
|
||||||
|
func (g GUID) Variant() Variant {
|
||||||
|
b := g.Data4[0]
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
return VariantNCS
|
||||||
|
} else if b&0xc0 == 0x80 {
|
||||||
|
return VariantRFC4122
|
||||||
|
} else if b&0xe0 == 0xc0 {
|
||||||
|
return VariantMicrosoft
|
||||||
|
} else if b&0xe0 == 0xe0 {
|
||||||
|
return VariantFuture
|
||||||
|
}
|
||||||
|
return VariantUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GUID) setVersion(v Version) {
|
||||||
|
g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version returns the GUID version, as defined in RFC 4122.
|
||||||
|
func (g GUID) Version() Version {
|
||||||
|
return Version((g.Data3 & 0xF000) >> 12)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText returns the textual representation of the GUID.
|
||||||
|
func (g GUID) MarshalText() ([]byte, error) {
|
||||||
|
return []byte(g.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText takes the textual representation of a GUID, and unmarhals it
|
||||||
|
// into this GUID.
|
||||||
|
func (g *GUID) UnmarshalText(text []byte) error {
|
||||||
|
g2, err := FromString(string(text))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*g = g2
|
||||||
|
return nil
|
||||||
|
}
|
2
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
2
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
|
@ -1,3 +1,3 @@
|
||||||
package winio
|
package winio
|
||||||
|
|
||||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
|
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
|
||||||
|
|
88
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
88
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
// Code generated by 'go generate'; DO NOT EDIT.
|
||||||
|
|
||||||
package winio
|
package winio
|
||||||
|
|
||||||
|
@ -38,19 +38,25 @@ func errnoErr(e syscall.Errno) error {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||||
|
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||||
|
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||||
|
|
||||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||||
|
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||||
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
|
|
||||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||||
|
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||||
|
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||||
|
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||||
|
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||||
|
@ -69,6 +75,7 @@ var (
|
||||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||||
|
procbind = modws2_32.NewProc("bind")
|
||||||
)
|
)
|
||||||
|
|
||||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||||
|
@ -120,6 +127,24 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||||
|
var _p0 uint32
|
||||||
|
if wait {
|
||||||
|
_p0 = 1
|
||||||
|
} else {
|
||||||
|
_p0 = 0
|
||||||
|
}
|
||||||
|
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
|
@ -176,27 +201,6 @@ func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityA
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitNamedPipe(name string, timeout uint32) (err error) {
|
|
||||||
var _p0 *uint16
|
|
||||||
_p0, err = syscall.UTF16PtrFromString(name)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return _waitNamedPipe(_p0, timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
|
|
||||||
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
|
|
||||||
if r1 == 0 {
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
} else {
|
|
||||||
err = syscall.EINVAL
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
|
@ -227,6 +231,32 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
|
||||||
|
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
|
||||||
|
status = ntstatus(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
|
||||||
|
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
winerr = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
|
||||||
|
status = ntstatus(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
|
||||||
|
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
|
||||||
|
status = ntstatus(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||||
var _p0 *uint16
|
var _p0 *uint16
|
||||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||||
|
@ -518,3 +548,15 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||||
|
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
|
||||||
|
if r1 == socketError {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
21
vendor/github.com/Microsoft/hcsshim/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Microsoft/hcsshim/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2015 Microsoft
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
51
vendor/github.com/Microsoft/hcsshim/osversion/osversion.go
generated
vendored
Normal file
51
vendor/github.com/Microsoft/hcsshim/osversion/osversion.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
package osversion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OSVersion is a wrapper for Windows version information
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
|
||||||
|
type OSVersion struct {
|
||||||
|
Version uint32
|
||||||
|
MajorVersion uint8
|
||||||
|
MinorVersion uint8
|
||||||
|
Build uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
|
||||||
|
type osVersionInfoEx struct {
|
||||||
|
OSVersionInfoSize uint32
|
||||||
|
MajorVersion uint32
|
||||||
|
MinorVersion uint32
|
||||||
|
BuildNumber uint32
|
||||||
|
PlatformID uint32
|
||||||
|
CSDVersion [128]uint16
|
||||||
|
ServicePackMajor uint16
|
||||||
|
ServicePackMinor uint16
|
||||||
|
SuiteMask uint16
|
||||||
|
ProductType byte
|
||||||
|
Reserve byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the operating system version on Windows.
|
||||||
|
// The calling application must be manifested to get the correct version information.
|
||||||
|
func Get() OSVersion {
|
||||||
|
var err error
|
||||||
|
osv := OSVersion{}
|
||||||
|
osv.Version, err = windows.GetVersion()
|
||||||
|
if err != nil {
|
||||||
|
// GetVersion never fails.
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
osv.MajorVersion = uint8(osv.Version & 0xFF)
|
||||||
|
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
|
||||||
|
osv.Build = uint16(osv.Version >> 16)
|
||||||
|
return osv
|
||||||
|
}
|
||||||
|
|
||||||
|
func (osv OSVersion) ToString() string {
|
||||||
|
return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build)
|
||||||
|
}
|
10
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
10
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
package osversion
|
||||||
|
|
||||||
|
const (
|
||||||
|
|
||||||
|
// RS2 was a client-only release in case you're asking why it's not in the list.
|
||||||
|
RS1 = 14393
|
||||||
|
RS3 = 16299
|
||||||
|
RS4 = 17134
|
||||||
|
RS5 = 17763
|
||||||
|
)
|
26
vendor/github.com/Nvveen/Gotty/LICENSE
generated
vendored
26
vendor/github.com/Nvveen/Gotty/LICENSE
generated
vendored
|
@ -1,26 +0,0 @@
|
||||||
Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com)
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer.
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer in the documentation
|
|
||||||
and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
|
||||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
||||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
||||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
|
||||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
The views and conclusions contained in the software and documentation are those
|
|
||||||
of the authors and should not be interpreted as representing official policies,
|
|
||||||
either expressed or implied, of the FreeBSD Project.
|
|
5
vendor/github.com/Nvveen/Gotty/README
generated
vendored
5
vendor/github.com/Nvveen/Gotty/README
generated
vendored
|
@ -1,5 +0,0 @@
|
||||||
Gotty is a library written in Go that determines and reads termcap database
|
|
||||||
files to produce an interface for interacting with the capabilities of a
|
|
||||||
terminal.
|
|
||||||
See the godoc documentation or the source code for more information about
|
|
||||||
function usage.
|
|
3
vendor/github.com/Nvveen/Gotty/TODO
generated
vendored
3
vendor/github.com/Nvveen/Gotty/TODO
generated
vendored
|
@ -1,3 +0,0 @@
|
||||||
gotty.go:// TODO add more concurrency to name lookup, look for more opportunities.
|
|
||||||
all:// TODO add more documentation, with function usage in a doc.go file.
|
|
||||||
all:// TODO add more testing/benchmarking with go test.
|
|
514
vendor/github.com/Nvveen/Gotty/attributes.go
generated
vendored
514
vendor/github.com/Nvveen/Gotty/attributes.go
generated
vendored
|
@ -1,514 +0,0 @@
|
||||||
// Copyright 2012 Neal van Veen. All rights reserved.
|
|
||||||
// Usage of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package gotty
|
|
||||||
|
|
||||||
// Boolean capabilities
|
|
||||||
var BoolAttr = [...]string{
|
|
||||||
"auto_left_margin", "bw",
|
|
||||||
"auto_right_margin", "am",
|
|
||||||
"no_esc_ctlc", "xsb",
|
|
||||||
"ceol_standout_glitch", "xhp",
|
|
||||||
"eat_newline_glitch", "xenl",
|
|
||||||
"erase_overstrike", "eo",
|
|
||||||
"generic_type", "gn",
|
|
||||||
"hard_copy", "hc",
|
|
||||||
"has_meta_key", "km",
|
|
||||||
"has_status_line", "hs",
|
|
||||||
"insert_null_glitch", "in",
|
|
||||||
"memory_above", "da",
|
|
||||||
"memory_below", "db",
|
|
||||||
"move_insert_mode", "mir",
|
|
||||||
"move_standout_mode", "msgr",
|
|
||||||
"over_strike", "os",
|
|
||||||
"status_line_esc_ok", "eslok",
|
|
||||||
"dest_tabs_magic_smso", "xt",
|
|
||||||
"tilde_glitch", "hz",
|
|
||||||
"transparent_underline", "ul",
|
|
||||||
"xon_xoff", "nxon",
|
|
||||||
"needs_xon_xoff", "nxon",
|
|
||||||
"prtr_silent", "mc5i",
|
|
||||||
"hard_cursor", "chts",
|
|
||||||
"non_rev_rmcup", "nrrmc",
|
|
||||||
"no_pad_char", "npc",
|
|
||||||
"non_dest_scroll_region", "ndscr",
|
|
||||||
"can_change", "ccc",
|
|
||||||
"back_color_erase", "bce",
|
|
||||||
"hue_lightness_saturation", "hls",
|
|
||||||
"col_addr_glitch", "xhpa",
|
|
||||||
"cr_cancels_micro_mode", "crxm",
|
|
||||||
"has_print_wheel", "daisy",
|
|
||||||
"row_addr_glitch", "xvpa",
|
|
||||||
"semi_auto_right_margin", "sam",
|
|
||||||
"cpi_changes_res", "cpix",
|
|
||||||
"lpi_changes_res", "lpix",
|
|
||||||
"backspaces_with_bs", "",
|
|
||||||
"crt_no_scrolling", "",
|
|
||||||
"no_correctly_working_cr", "",
|
|
||||||
"gnu_has_meta_key", "",
|
|
||||||
"linefeed_is_newline", "",
|
|
||||||
"has_hardware_tabs", "",
|
|
||||||
"return_does_clr_eol", "",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Numerical capabilities
|
|
||||||
var NumAttr = [...]string{
|
|
||||||
"columns", "cols",
|
|
||||||
"init_tabs", "it",
|
|
||||||
"lines", "lines",
|
|
||||||
"lines_of_memory", "lm",
|
|
||||||
"magic_cookie_glitch", "xmc",
|
|
||||||
"padding_baud_rate", "pb",
|
|
||||||
"virtual_terminal", "vt",
|
|
||||||
"width_status_line", "wsl",
|
|
||||||
"num_labels", "nlab",
|
|
||||||
"label_height", "lh",
|
|
||||||
"label_width", "lw",
|
|
||||||
"max_attributes", "ma",
|
|
||||||
"maximum_windows", "wnum",
|
|
||||||
"max_colors", "colors",
|
|
||||||
"max_pairs", "pairs",
|
|
||||||
"no_color_video", "ncv",
|
|
||||||
"buffer_capacity", "bufsz",
|
|
||||||
"dot_vert_spacing", "spinv",
|
|
||||||
"dot_horz_spacing", "spinh",
|
|
||||||
"max_micro_address", "maddr",
|
|
||||||
"max_micro_jump", "mjump",
|
|
||||||
"micro_col_size", "mcs",
|
|
||||||
"micro_line_size", "mls",
|
|
||||||
"number_of_pins", "npins",
|
|
||||||
"output_res_char", "orc",
|
|
||||||
"output_res_line", "orl",
|
|
||||||
"output_res_horz_inch", "orhi",
|
|
||||||
"output_res_vert_inch", "orvi",
|
|
||||||
"print_rate", "cps",
|
|
||||||
"wide_char_size", "widcs",
|
|
||||||
"buttons", "btns",
|
|
||||||
"bit_image_entwining", "bitwin",
|
|
||||||
"bit_image_type", "bitype",
|
|
||||||
"magic_cookie_glitch_ul", "",
|
|
||||||
"carriage_return_delay", "",
|
|
||||||
"new_line_delay", "",
|
|
||||||
"backspace_delay", "",
|
|
||||||
"horizontal_tab_delay", "",
|
|
||||||
"number_of_function_keys", "",
|
|
||||||
}
|
|
||||||
|
|
||||||
// String capabilities
|
|
||||||
var StrAttr = [...]string{
|
|
||||||
"back_tab", "cbt",
|
|
||||||
"bell", "bel",
|
|
||||||
"carriage_return", "cr",
|
|
||||||
"change_scroll_region", "csr",
|
|
||||||
"clear_all_tabs", "tbc",
|
|
||||||
"clear_screen", "clear",
|
|
||||||
"clr_eol", "el",
|
|
||||||
"clr_eos", "ed",
|
|
||||||
"column_address", "hpa",
|
|
||||||
"command_character", "cmdch",
|
|
||||||
"cursor_address", "cup",
|
|
||||||
"cursor_down", "cud1",
|
|
||||||
"cursor_home", "home",
|
|
||||||
"cursor_invisible", "civis",
|
|
||||||
"cursor_left", "cub1",
|
|
||||||
"cursor_mem_address", "mrcup",
|
|
||||||
"cursor_normal", "cnorm",
|
|
||||||
"cursor_right", "cuf1",
|
|
||||||
"cursor_to_ll", "ll",
|
|
||||||
"cursor_up", "cuu1",
|
|
||||||
"cursor_visible", "cvvis",
|
|
||||||
"delete_character", "dch1",
|
|
||||||
"delete_line", "dl1",
|
|
||||||
"dis_status_line", "dsl",
|
|
||||||
"down_half_line", "hd",
|
|
||||||
"enter_alt_charset_mode", "smacs",
|
|
||||||
"enter_blink_mode", "blink",
|
|
||||||
"enter_bold_mode", "bold",
|
|
||||||
"enter_ca_mode", "smcup",
|
|
||||||
"enter_delete_mode", "smdc",
|
|
||||||
"enter_dim_mode", "dim",
|
|
||||||
"enter_insert_mode", "smir",
|
|
||||||
"enter_secure_mode", "invis",
|
|
||||||
"enter_protected_mode", "prot",
|
|
||||||
"enter_reverse_mode", "rev",
|
|
||||||
"enter_standout_mode", "smso",
|
|
||||||
"enter_underline_mode", "smul",
|
|
||||||
"erase_chars", "ech",
|
|
||||||
"exit_alt_charset_mode", "rmacs",
|
|
||||||
"exit_attribute_mode", "sgr0",
|
|
||||||
"exit_ca_mode", "rmcup",
|
|
||||||
"exit_delete_mode", "rmdc",
|
|
||||||
"exit_insert_mode", "rmir",
|
|
||||||
"exit_standout_mode", "rmso",
|
|
||||||
"exit_underline_mode", "rmul",
|
|
||||||
"flash_screen", "flash",
|
|
||||||
"form_feed", "ff",
|
|
||||||
"from_status_line", "fsl",
|
|
||||||
"init_1string", "is1",
|
|
||||||
"init_2string", "is2",
|
|
||||||
"init_3string", "is3",
|
|
||||||
"init_file", "if",
|
|
||||||
"insert_character", "ich1",
|
|
||||||
"insert_line", "il1",
|
|
||||||
"insert_padding", "ip",
|
|
||||||
"key_backspace", "kbs",
|
|
||||||
"key_catab", "ktbc",
|
|
||||||
"key_clear", "kclr",
|
|
||||||
"key_ctab", "kctab",
|
|
||||||
"key_dc", "kdch1",
|
|
||||||
"key_dl", "kdl1",
|
|
||||||
"key_down", "kcud1",
|
|
||||||
"key_eic", "krmir",
|
|
||||||
"key_eol", "kel",
|
|
||||||
"key_eos", "ked",
|
|
||||||
"key_f0", "kf0",
|
|
||||||
"key_f1", "kf1",
|
|
||||||
"key_f10", "kf10",
|
|
||||||
"key_f2", "kf2",
|
|
||||||
"key_f3", "kf3",
|
|
||||||
"key_f4", "kf4",
|
|
||||||
"key_f5", "kf5",
|
|
||||||
"key_f6", "kf6",
|
|
||||||
"key_f7", "kf7",
|
|
||||||
"key_f8", "kf8",
|
|
||||||
"key_f9", "kf9",
|
|
||||||
"key_home", "khome",
|
|
||||||
"key_ic", "kich1",
|
|
||||||
"key_il", "kil1",
|
|
||||||
"key_left", "kcub1",
|
|
||||||
"key_ll", "kll",
|
|
||||||
"key_npage", "knp",
|
|
||||||
"key_ppage", "kpp",
|
|
||||||
"key_right", "kcuf1",
|
|
||||||
"key_sf", "kind",
|
|
||||||
"key_sr", "kri",
|
|
||||||
"key_stab", "khts",
|
|
||||||
"key_up", "kcuu1",
|
|
||||||
"keypad_local", "rmkx",
|
|
||||||
"keypad_xmit", "smkx",
|
|
||||||
"lab_f0", "lf0",
|
|
||||||
"lab_f1", "lf1",
|
|
||||||
"lab_f10", "lf10",
|
|
||||||
"lab_f2", "lf2",
|
|
||||||
"lab_f3", "lf3",
|
|
||||||
"lab_f4", "lf4",
|
|
||||||
"lab_f5", "lf5",
|
|
||||||
"lab_f6", "lf6",
|
|
||||||
"lab_f7", "lf7",
|
|
||||||
"lab_f8", "lf8",
|
|
||||||
"lab_f9", "lf9",
|
|
||||||
"meta_off", "rmm",
|
|
||||||
"meta_on", "smm",
|
|
||||||
"newline", "_glitch",
|
|
||||||
"pad_char", "npc",
|
|
||||||
"parm_dch", "dch",
|
|
||||||
"parm_delete_line", "dl",
|
|
||||||
"parm_down_cursor", "cud",
|
|
||||||
"parm_ich", "ich",
|
|
||||||
"parm_index", "indn",
|
|
||||||
"parm_insert_line", "il",
|
|
||||||
"parm_left_cursor", "cub",
|
|
||||||
"parm_right_cursor", "cuf",
|
|
||||||
"parm_rindex", "rin",
|
|
||||||
"parm_up_cursor", "cuu",
|
|
||||||
"pkey_key", "pfkey",
|
|
||||||
"pkey_local", "pfloc",
|
|
||||||
"pkey_xmit", "pfx",
|
|
||||||
"print_screen", "mc0",
|
|
||||||
"prtr_off", "mc4",
|
|
||||||
"prtr_on", "mc5",
|
|
||||||
"repeat_char", "rep",
|
|
||||||
"reset_1string", "rs1",
|
|
||||||
"reset_2string", "rs2",
|
|
||||||
"reset_3string", "rs3",
|
|
||||||
"reset_file", "rf",
|
|
||||||
"restore_cursor", "rc",
|
|
||||||
"row_address", "mvpa",
|
|
||||||
"save_cursor", "row_address",
|
|
||||||
"scroll_forward", "ind",
|
|
||||||
"scroll_reverse", "ri",
|
|
||||||
"set_attributes", "sgr",
|
|
||||||
"set_tab", "hts",
|
|
||||||
"set_window", "wind",
|
|
||||||
"tab", "s_magic_smso",
|
|
||||||
"to_status_line", "tsl",
|
|
||||||
"underline_char", "uc",
|
|
||||||
"up_half_line", "hu",
|
|
||||||
"init_prog", "iprog",
|
|
||||||
"key_a1", "ka1",
|
|
||||||
"key_a3", "ka3",
|
|
||||||
"key_b2", "kb2",
|
|
||||||
"key_c1", "kc1",
|
|
||||||
"key_c3", "kc3",
|
|
||||||
"prtr_non", "mc5p",
|
|
||||||
"char_padding", "rmp",
|
|
||||||
"acs_chars", "acsc",
|
|
||||||
"plab_norm", "pln",
|
|
||||||
"key_btab", "kcbt",
|
|
||||||
"enter_xon_mode", "smxon",
|
|
||||||
"exit_xon_mode", "rmxon",
|
|
||||||
"enter_am_mode", "smam",
|
|
||||||
"exit_am_mode", "rmam",
|
|
||||||
"xon_character", "xonc",
|
|
||||||
"xoff_character", "xoffc",
|
|
||||||
"ena_acs", "enacs",
|
|
||||||
"label_on", "smln",
|
|
||||||
"label_off", "rmln",
|
|
||||||
"key_beg", "kbeg",
|
|
||||||
"key_cancel", "kcan",
|
|
||||||
"key_close", "kclo",
|
|
||||||
"key_command", "kcmd",
|
|
||||||
"key_copy", "kcpy",
|
|
||||||
"key_create", "kcrt",
|
|
||||||
"key_end", "kend",
|
|
||||||
"key_enter", "kent",
|
|
||||||
"key_exit", "kext",
|
|
||||||
"key_find", "kfnd",
|
|
||||||
"key_help", "khlp",
|
|
||||||
"key_mark", "kmrk",
|
|
||||||
"key_message", "kmsg",
|
|
||||||
"key_move", "kmov",
|
|
||||||
"key_next", "knxt",
|
|
||||||
"key_open", "kopn",
|
|
||||||
"key_options", "kopt",
|
|
||||||
"key_previous", "kprv",
|
|
||||||
"key_print", "kprt",
|
|
||||||
"key_redo", "krdo",
|
|
||||||
"key_reference", "kref",
|
|
||||||
"key_refresh", "krfr",
|
|
||||||
"key_replace", "krpl",
|
|
||||||
"key_restart", "krst",
|
|
||||||
"key_resume", "kres",
|
|
||||||
"key_save", "ksav",
|
|
||||||
"key_suspend", "kspd",
|
|
||||||
"key_undo", "kund",
|
|
||||||
"key_sbeg", "kBEG",
|
|
||||||
"key_scancel", "kCAN",
|
|
||||||
"key_scommand", "kCMD",
|
|
||||||
"key_scopy", "kCPY",
|
|
||||||
"key_screate", "kCRT",
|
|
||||||
"key_sdc", "kDC",
|
|
||||||
"key_sdl", "kDL",
|
|
||||||
"key_select", "kslt",
|
|
||||||
"key_send", "kEND",
|
|
||||||
"key_seol", "kEOL",
|
|
||||||
"key_sexit", "kEXT",
|
|
||||||
"key_sfind", "kFND",
|
|
||||||
"key_shelp", "kHLP",
|
|
||||||
"key_shome", "kHOM",
|
|
||||||
"key_sic", "kIC",
|
|
||||||
"key_sleft", "kLFT",
|
|
||||||
"key_smessage", "kMSG",
|
|
||||||
"key_smove", "kMOV",
|
|
||||||
"key_snext", "kNXT",
|
|
||||||
"key_soptions", "kOPT",
|
|
||||||
"key_sprevious", "kPRV",
|
|
||||||
"key_sprint", "kPRT",
|
|
||||||
"key_sredo", "kRDO",
|
|
||||||
"key_sreplace", "kRPL",
|
|
||||||
"key_sright", "kRIT",
|
|
||||||
"key_srsume", "kRES",
|
|
||||||
"key_ssave", "kSAV",
|
|
||||||
"key_ssuspend", "kSPD",
|
|
||||||
"key_sundo", "kUND",
|
|
||||||
"req_for_input", "rfi",
|
|
||||||
"key_f11", "kf11",
|
|
||||||
"key_f12", "kf12",
|
|
||||||
"key_f13", "kf13",
|
|
||||||
"key_f14", "kf14",
|
|
||||||
"key_f15", "kf15",
|
|
||||||
"key_f16", "kf16",
|
|
||||||
"key_f17", "kf17",
|
|
||||||
"key_f18", "kf18",
|
|
||||||
"key_f19", "kf19",
|
|
||||||
"key_f20", "kf20",
|
|
||||||
"key_f21", "kf21",
|
|
||||||
"key_f22", "kf22",
|
|
||||||
"key_f23", "kf23",
|
|
||||||
"key_f24", "kf24",
|
|
||||||
"key_f25", "kf25",
|
|
||||||
"key_f26", "kf26",
|
|
||||||
"key_f27", "kf27",
|
|
||||||
"key_f28", "kf28",
|
|
||||||
"key_f29", "kf29",
|
|
||||||
"key_f30", "kf30",
|
|
||||||
"key_f31", "kf31",
|
|
||||||
"key_f32", "kf32",
|
|
||||||
"key_f33", "kf33",
|
|
||||||
"key_f34", "kf34",
|
|
||||||
"key_f35", "kf35",
|
|
||||||
"key_f36", "kf36",
|
|
||||||
"key_f37", "kf37",
|
|
||||||
"key_f38", "kf38",
|
|
||||||
"key_f39", "kf39",
|
|
||||||
"key_f40", "kf40",
|
|
||||||
"key_f41", "kf41",
|
|
||||||
"key_f42", "kf42",
|
|
||||||
"key_f43", "kf43",
|
|
||||||
"key_f44", "kf44",
|
|
||||||
"key_f45", "kf45",
|
|
||||||
"key_f46", "kf46",
|
|
||||||
"key_f47", "kf47",
|
|
||||||
"key_f48", "kf48",
|
|
||||||
"key_f49", "kf49",
|
|
||||||
"key_f50", "kf50",
|
|
||||||
"key_f51", "kf51",
|
|
||||||
"key_f52", "kf52",
|
|
||||||
"key_f53", "kf53",
|
|
||||||
"key_f54", "kf54",
|
|
||||||
"key_f55", "kf55",
|
|
||||||
"key_f56", "kf56",
|
|
||||||
"key_f57", "kf57",
|
|
||||||
"key_f58", "kf58",
|
|
||||||
"key_f59", "kf59",
|
|
||||||
"key_f60", "kf60",
|
|
||||||
"key_f61", "kf61",
|
|
||||||
"key_f62", "kf62",
|
|
||||||
"key_f63", "kf63",
|
|
||||||
"clr_bol", "el1",
|
|
||||||
"clear_margins", "mgc",
|
|
||||||
"set_left_margin", "smgl",
|
|
||||||
"set_right_margin", "smgr",
|
|
||||||
"label_format", "fln",
|
|
||||||
"set_clock", "sclk",
|
|
||||||
"display_clock", "dclk",
|
|
||||||
"remove_clock", "rmclk",
|
|
||||||
"create_window", "cwin",
|
|
||||||
"goto_window", "wingo",
|
|
||||||
"hangup", "hup",
|
|
||||||
"dial_phone", "dial",
|
|
||||||
"quick_dial", "qdial",
|
|
||||||
"tone", "tone",
|
|
||||||
"pulse", "pulse",
|
|
||||||
"flash_hook", "hook",
|
|
||||||
"fixed_pause", "pause",
|
|
||||||
"wait_tone", "wait",
|
|
||||||
"user0", "u0",
|
|
||||||
"user1", "u1",
|
|
||||||
"user2", "u2",
|
|
||||||
"user3", "u3",
|
|
||||||
"user4", "u4",
|
|
||||||
"user5", "u5",
|
|
||||||
"user6", "u6",
|
|
||||||
"user7", "u7",
|
|
||||||
"user8", "u8",
|
|
||||||
"user9", "u9",
|
|
||||||
"orig_pair", "op",
|
|
||||||
"orig_colors", "oc",
|
|
||||||
"initialize_color", "initc",
|
|
||||||
"initialize_pair", "initp",
|
|
||||||
"set_color_pair", "scp",
|
|
||||||
"set_foreground", "setf",
|
|
||||||
"set_background", "setb",
|
|
||||||
"change_char_pitch", "cpi",
|
|
||||||
"change_line_pitch", "lpi",
|
|
||||||
"change_res_horz", "chr",
|
|
||||||
"change_res_vert", "cvr",
|
|
||||||
"define_char", "defc",
|
|
||||||
"enter_doublewide_mode", "swidm",
|
|
||||||
"enter_draft_quality", "sdrfq",
|
|
||||||
"enter_italics_mode", "sitm",
|
|
||||||
"enter_leftward_mode", "slm",
|
|
||||||
"enter_micro_mode", "smicm",
|
|
||||||
"enter_near_letter_quality", "snlq",
|
|
||||||
"enter_normal_quality", "snrmq",
|
|
||||||
"enter_shadow_mode", "sshm",
|
|
||||||
"enter_subscript_mode", "ssubm",
|
|
||||||
"enter_superscript_mode", "ssupm",
|
|
||||||
"enter_upward_mode", "sum",
|
|
||||||
"exit_doublewide_mode", "rwidm",
|
|
||||||
"exit_italics_mode", "ritm",
|
|
||||||
"exit_leftward_mode", "rlm",
|
|
||||||
"exit_micro_mode", "rmicm",
|
|
||||||
"exit_shadow_mode", "rshm",
|
|
||||||
"exit_subscript_mode", "rsubm",
|
|
||||||
"exit_superscript_mode", "rsupm",
|
|
||||||
"exit_upward_mode", "rum",
|
|
||||||
"micro_column_address", "mhpa",
|
|
||||||
"micro_down", "mcud1",
|
|
||||||
"micro_left", "mcub1",
|
|
||||||
"micro_right", "mcuf1",
|
|
||||||
"micro_row_address", "mvpa",
|
|
||||||
"micro_up", "mcuu1",
|
|
||||||
"order_of_pins", "porder",
|
|
||||||
"parm_down_micro", "mcud",
|
|
||||||
"parm_left_micro", "mcub",
|
|
||||||
"parm_right_micro", "mcuf",
|
|
||||||
"parm_up_micro", "mcuu",
|
|
||||||
"select_char_set", "scs",
|
|
||||||
"set_bottom_margin", "smgb",
|
|
||||||
"set_bottom_margin_parm", "smgbp",
|
|
||||||
"set_left_margin_parm", "smglp",
|
|
||||||
"set_right_margin_parm", "smgrp",
|
|
||||||
"set_top_margin", "smgt",
|
|
||||||
"set_top_margin_parm", "smgtp",
|
|
||||||
"start_bit_image", "sbim",
|
|
||||||
"start_char_set_def", "scsd",
|
|
||||||
"stop_bit_image", "rbim",
|
|
||||||
"stop_char_set_def", "rcsd",
|
|
||||||
"subscript_characters", "subcs",
|
|
||||||
"superscript_characters", "supcs",
|
|
||||||
"these_cause_cr", "docr",
|
|
||||||
"zero_motion", "zerom",
|
|
||||||
"char_set_names", "csnm",
|
|
||||||
"key_mouse", "kmous",
|
|
||||||
"mouse_info", "minfo",
|
|
||||||
"req_mouse_pos", "reqmp",
|
|
||||||
"get_mouse", "getm",
|
|
||||||
"set_a_foreground", "setaf",
|
|
||||||
"set_a_background", "setab",
|
|
||||||
"pkey_plab", "pfxl",
|
|
||||||
"device_type", "devt",
|
|
||||||
"code_set_init", "csin",
|
|
||||||
"set0_des_seq", "s0ds",
|
|
||||||
"set1_des_seq", "s1ds",
|
|
||||||
"set2_des_seq", "s2ds",
|
|
||||||
"set3_des_seq", "s3ds",
|
|
||||||
"set_lr_margin", "smglr",
|
|
||||||
"set_tb_margin", "smgtb",
|
|
||||||
"bit_image_repeat", "birep",
|
|
||||||
"bit_image_newline", "binel",
|
|
||||||
"bit_image_carriage_return", "bicr",
|
|
||||||
"color_names", "colornm",
|
|
||||||
"define_bit_image_region", "defbi",
|
|
||||||
"end_bit_image_region", "endbi",
|
|
||||||
"set_color_band", "setcolor",
|
|
||||||
"set_page_length", "slines",
|
|
||||||
"display_pc_char", "dispc",
|
|
||||||
"enter_pc_charset_mode", "smpch",
|
|
||||||
"exit_pc_charset_mode", "rmpch",
|
|
||||||
"enter_scancode_mode", "smsc",
|
|
||||||
"exit_scancode_mode", "rmsc",
|
|
||||||
"pc_term_options", "pctrm",
|
|
||||||
"scancode_escape", "scesc",
|
|
||||||
"alt_scancode_esc", "scesa",
|
|
||||||
"enter_horizontal_hl_mode", "ehhlm",
|
|
||||||
"enter_left_hl_mode", "elhlm",
|
|
||||||
"enter_low_hl_mode", "elohlm",
|
|
||||||
"enter_right_hl_mode", "erhlm",
|
|
||||||
"enter_top_hl_mode", "ethlm",
|
|
||||||
"enter_vertical_hl_mode", "evhlm",
|
|
||||||
"set_a_attributes", "sgr1",
|
|
||||||
"set_pglen_inch", "slength",
|
|
||||||
"termcap_init2", "",
|
|
||||||
"termcap_reset", "",
|
|
||||||
"linefeed_if_not_lf", "",
|
|
||||||
"backspace_if_not_bs", "",
|
|
||||||
"other_non_function_keys", "",
|
|
||||||
"arrow_key_map", "",
|
|
||||||
"acs_ulcorner", "",
|
|
||||||
"acs_llcorner", "",
|
|
||||||
"acs_urcorner", "",
|
|
||||||
"acs_lrcorner", "",
|
|
||||||
"acs_ltee", "",
|
|
||||||
"acs_rtee", "",
|
|
||||||
"acs_btee", "",
|
|
||||||
"acs_ttee", "",
|
|
||||||
"acs_hline", "",
|
|
||||||
"acs_vline", "",
|
|
||||||
"acs_plus", "",
|
|
||||||
"memory_lock", "",
|
|
||||||
"memory_unlock", "",
|
|
||||||
"box_chars_1", "",
|
|
||||||
}
|
|
238
vendor/github.com/Nvveen/Gotty/gotty.go
generated
vendored
238
vendor/github.com/Nvveen/Gotty/gotty.go
generated
vendored
|
@ -1,238 +0,0 @@
|
||||||
// Copyright 2012 Neal van Veen. All rights reserved.
|
|
||||||
// Usage of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
// Gotty is a Go-package for reading and parsing the terminfo database
|
|
||||||
package gotty
|
|
||||||
|
|
||||||
// TODO add more concurrency to name lookup, look for more opportunities.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Open a terminfo file by the name given and construct a TermInfo object.
|
|
||||||
// If something went wrong reading the terminfo database file, an error is
|
|
||||||
// returned.
|
|
||||||
func OpenTermInfo(termName string) (*TermInfo, error) {
|
|
||||||
var term *TermInfo
|
|
||||||
var err error
|
|
||||||
// Find the environment variables
|
|
||||||
termloc := os.Getenv("TERMINFO")
|
|
||||||
if len(termloc) == 0 {
|
|
||||||
// Search like ncurses
|
|
||||||
locations := []string{os.Getenv("HOME") + "/.terminfo/", "/etc/terminfo/",
|
|
||||||
"/lib/terminfo/", "/usr/share/terminfo/"}
|
|
||||||
var path string
|
|
||||||
for _, str := range locations {
|
|
||||||
// Construct path
|
|
||||||
path = str + string(termName[0]) + "/" + termName
|
|
||||||
// Check if path can be opened
|
|
||||||
file, _ := os.Open(path)
|
|
||||||
if file != nil {
|
|
||||||
// Path can open, fall out and use current path
|
|
||||||
file.Close()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(path) > 0 {
|
|
||||||
term, err = readTermInfo(path)
|
|
||||||
} else {
|
|
||||||
err = errors.New(fmt.Sprintf("No terminfo file(-location) found"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return term, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open a terminfo file from the environment variable containing the current
|
|
||||||
// terminal name and construct a TermInfo object. If something went wrong
|
|
||||||
// reading the terminfo database file, an error is returned.
|
|
||||||
func OpenTermInfoEnv() (*TermInfo, error) {
|
|
||||||
termenv := os.Getenv("TERM")
|
|
||||||
return OpenTermInfo(termenv)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return an attribute by the name attr provided. If none can be found,
|
|
||||||
// an error is returned.
|
|
||||||
func (term *TermInfo) GetAttribute(attr string) (stacker, error) {
|
|
||||||
// Channel to store the main value in.
|
|
||||||
var value stacker
|
|
||||||
// Add a blocking WaitGroup
|
|
||||||
var block sync.WaitGroup
|
|
||||||
// Keep track of variable being written.
|
|
||||||
written := false
|
|
||||||
// Function to put into goroutine.
|
|
||||||
f := func(ats interface{}) {
|
|
||||||
var ok bool
|
|
||||||
var v stacker
|
|
||||||
// Switch on type of map to use and assign value to it.
|
|
||||||
switch reflect.TypeOf(ats).Elem().Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
v, ok = ats.(map[string]bool)[attr]
|
|
||||||
case reflect.Int16:
|
|
||||||
v, ok = ats.(map[string]int16)[attr]
|
|
||||||
case reflect.String:
|
|
||||||
v, ok = ats.(map[string]string)[attr]
|
|
||||||
}
|
|
||||||
// If ok, a value is found, so we can write.
|
|
||||||
if ok {
|
|
||||||
value = v
|
|
||||||
written = true
|
|
||||||
}
|
|
||||||
// Goroutine is done
|
|
||||||
block.Done()
|
|
||||||
}
|
|
||||||
block.Add(3)
|
|
||||||
// Go for all 3 attribute lists.
|
|
||||||
go f(term.boolAttributes)
|
|
||||||
go f(term.numAttributes)
|
|
||||||
go f(term.strAttributes)
|
|
||||||
// Wait until every goroutine is done.
|
|
||||||
block.Wait()
|
|
||||||
// If a value has been written, return it.
|
|
||||||
if written {
|
|
||||||
return value, nil
|
|
||||||
}
|
|
||||||
// Otherwise, error.
|
|
||||||
return nil, fmt.Errorf("Erorr finding attribute")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return an attribute by the name attr provided. If none can be found,
|
|
||||||
// an error is returned. A name is first converted to its termcap value.
|
|
||||||
func (term *TermInfo) GetAttributeName(name string) (stacker, error) {
|
|
||||||
tc := GetTermcapName(name)
|
|
||||||
return term.GetAttribute(tc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A utility function that finds and returns the termcap equivalent of a
|
|
||||||
// variable name.
|
|
||||||
func GetTermcapName(name string) string {
|
|
||||||
// Termcap name
|
|
||||||
var tc string
|
|
||||||
// Blocking group
|
|
||||||
var wait sync.WaitGroup
|
|
||||||
// Function to put into a goroutine
|
|
||||||
f := func(attrs []string) {
|
|
||||||
// Find the string corresponding to the name
|
|
||||||
for i, s := range attrs {
|
|
||||||
if s == name {
|
|
||||||
tc = attrs[i+1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Goroutine is finished
|
|
||||||
wait.Done()
|
|
||||||
}
|
|
||||||
wait.Add(3)
|
|
||||||
// Go for all 3 attribute lists
|
|
||||||
go f(BoolAttr[:])
|
|
||||||
go f(NumAttr[:])
|
|
||||||
go f(StrAttr[:])
|
|
||||||
// Wait until every goroutine is done
|
|
||||||
wait.Wait()
|
|
||||||
// Return the termcap name
|
|
||||||
return tc
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function takes a path to a terminfo file and reads it in binary
|
|
||||||
// form to construct the actual TermInfo file.
|
|
||||||
func readTermInfo(path string) (*TermInfo, error) {
|
|
||||||
// Open the terminfo file
|
|
||||||
file, err := os.Open(path)
|
|
||||||
defer file.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize
|
|
||||||
// Header is composed of the magic 0432 octal number, size of the name
|
|
||||||
// section, size of the boolean section, the amount of number values,
|
|
||||||
// the number of offsets of strings, and the size of the string section.
|
|
||||||
var header [6]int16
|
|
||||||
// Byte array is used to read in byte values
|
|
||||||
var byteArray []byte
|
|
||||||
// Short array is used to read in short values
|
|
||||||
var shArray []int16
|
|
||||||
// TermInfo object to store values
|
|
||||||
var term TermInfo
|
|
||||||
|
|
||||||
// Read in the header
|
|
||||||
err = binary.Read(file, binary.LittleEndian, &header)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// If magic number isn't there or isn't correct, we have the wrong filetype
|
|
||||||
if header[0] != 0432 {
|
|
||||||
return nil, errors.New(fmt.Sprintf("Wrong filetype"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read in the names
|
|
||||||
byteArray = make([]byte, header[1])
|
|
||||||
err = binary.Read(file, binary.LittleEndian, &byteArray)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
term.Names = strings.Split(string(byteArray), "|")
|
|
||||||
|
|
||||||
// Read in the booleans
|
|
||||||
byteArray = make([]byte, header[2])
|
|
||||||
err = binary.Read(file, binary.LittleEndian, &byteArray)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
term.boolAttributes = make(map[string]bool)
|
|
||||||
for i, b := range byteArray {
|
|
||||||
if b == 1 {
|
|
||||||
term.boolAttributes[BoolAttr[i*2+1]] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If the number of bytes read is not even, a byte for alignment is added
|
|
||||||
if len(byteArray)%2 != 0 {
|
|
||||||
err = binary.Read(file, binary.LittleEndian, make([]byte, 1))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read in shorts
|
|
||||||
shArray = make([]int16, header[3])
|
|
||||||
err = binary.Read(file, binary.LittleEndian, &shArray)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
term.numAttributes = make(map[string]int16)
|
|
||||||
for i, n := range shArray {
|
|
||||||
if n != 0377 && n > -1 {
|
|
||||||
term.numAttributes[NumAttr[i*2+1]] = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the offsets into the short array
|
|
||||||
shArray = make([]int16, header[4])
|
|
||||||
err = binary.Read(file, binary.LittleEndian, &shArray)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Read the actual strings in the byte array
|
|
||||||
byteArray = make([]byte, header[5])
|
|
||||||
err = binary.Read(file, binary.LittleEndian, &byteArray)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
term.strAttributes = make(map[string]string)
|
|
||||||
// We get an offset, and then iterate until the string is null-terminated
|
|
||||||
for i, offset := range shArray {
|
|
||||||
if offset > -1 {
|
|
||||||
r := offset
|
|
||||||
for ; byteArray[r] != 0; r++ {
|
|
||||||
}
|
|
||||||
term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &term, nil
|
|
||||||
}
|
|
362
vendor/github.com/Nvveen/Gotty/parser.go
generated
vendored
362
vendor/github.com/Nvveen/Gotty/parser.go
generated
vendored
|
@ -1,362 +0,0 @@
|
||||||
// Copyright 2012 Neal van Veen. All rights reserved.
|
|
||||||
// Usage of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package gotty
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var exp = [...]string{
|
|
||||||
"%%",
|
|
||||||
"%c",
|
|
||||||
"%s",
|
|
||||||
"%p(\\d)",
|
|
||||||
"%P([A-z])",
|
|
||||||
"%g([A-z])",
|
|
||||||
"%'(.)'",
|
|
||||||
"%{([0-9]+)}",
|
|
||||||
"%l",
|
|
||||||
"%\\+|%-|%\\*|%/|%m",
|
|
||||||
"%&|%\\||%\\^",
|
|
||||||
"%=|%>|%<",
|
|
||||||
"%A|%O",
|
|
||||||
"%!|%~",
|
|
||||||
"%i",
|
|
||||||
"%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]",
|
|
||||||
"%\\?(.*?);",
|
|
||||||
}
|
|
||||||
|
|
||||||
var regex *regexp.Regexp
|
|
||||||
var staticVar map[byte]stacker
|
|
||||||
|
|
||||||
// Parses the attribute that is received with name attr and parameters params.
|
|
||||||
func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) {
|
|
||||||
// Get the attribute name first.
|
|
||||||
iface, err := term.GetAttribute(attr)
|
|
||||||
str, ok := iface.(string)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
return str, errors.New("Only string capabilities can be parsed.")
|
|
||||||
}
|
|
||||||
// Construct the hidden parser struct so we can use a recursive stack based
|
|
||||||
// parser.
|
|
||||||
ps := &parser{}
|
|
||||||
// Dynamic variables only exist in this context.
|
|
||||||
ps.dynamicVar = make(map[byte]stacker, 26)
|
|
||||||
ps.parameters = make([]stacker, len(params))
|
|
||||||
// Convert the parameters to insert them into the parser struct.
|
|
||||||
for i, x := range params {
|
|
||||||
ps.parameters[i] = x
|
|
||||||
}
|
|
||||||
// Recursively walk and return.
|
|
||||||
result, err := ps.walk(str)
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parses the attribute that is received with name attr and parameters params.
|
|
||||||
// Only works on full name of a capability that is given, which it uses to
|
|
||||||
// search for the termcap name.
|
|
||||||
func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) {
|
|
||||||
tc := GetTermcapName(attr)
|
|
||||||
return term.Parse(tc, params)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Identify each token in a stack based manner and do the actual parsing.
|
|
||||||
func (ps *parser) walk(attr string) (string, error) {
|
|
||||||
// We use a buffer to get the modified string.
|
|
||||||
var buf bytes.Buffer
|
|
||||||
// Next, find and identify all tokens by their indices and strings.
|
|
||||||
tokens := regex.FindAllStringSubmatch(attr, -1)
|
|
||||||
if len(tokens) == 0 {
|
|
||||||
return attr, nil
|
|
||||||
}
|
|
||||||
indices := regex.FindAllStringIndex(attr, -1)
|
|
||||||
q := 0 // q counts the matches of one token
|
|
||||||
// Iterate through the string per character.
|
|
||||||
for i := 0; i < len(attr); i++ {
|
|
||||||
// If the current position is an identified token, execute the following
|
|
||||||
// steps.
|
|
||||||
if q < len(indices) && i >= indices[q][0] && i < indices[q][1] {
|
|
||||||
// Switch on token.
|
|
||||||
switch {
|
|
||||||
case tokens[q][0][:2] == "%%":
|
|
||||||
// Literal percentage character.
|
|
||||||
buf.WriteByte('%')
|
|
||||||
case tokens[q][0][:2] == "%c":
|
|
||||||
// Pop a character.
|
|
||||||
c, err := ps.st.pop()
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
}
|
|
||||||
buf.WriteByte(c.(byte))
|
|
||||||
case tokens[q][0][:2] == "%s":
|
|
||||||
// Pop a string.
|
|
||||||
str, err := ps.st.pop()
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
}
|
|
||||||
if _, ok := str.(string); !ok {
|
|
||||||
return buf.String(), errors.New("Stack head is not a string")
|
|
||||||
}
|
|
||||||
buf.WriteString(str.(string))
|
|
||||||
case tokens[q][0][:2] == "%p":
|
|
||||||
// Push a parameter on the stack.
|
|
||||||
index, err := strconv.ParseInt(tokens[q][1], 10, 8)
|
|
||||||
index--
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
}
|
|
||||||
if int(index) >= len(ps.parameters) {
|
|
||||||
return buf.String(), errors.New("Parameters index out of bound")
|
|
||||||
}
|
|
||||||
ps.st.push(ps.parameters[index])
|
|
||||||
case tokens[q][0][:2] == "%P":
|
|
||||||
// Pop a variable from the stack as a dynamic or static variable.
|
|
||||||
val, err := ps.st.pop()
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
}
|
|
||||||
index := tokens[q][2]
|
|
||||||
if len(index) > 1 {
|
|
||||||
errorStr := fmt.Sprintf("%s is not a valid dynamic variables index",
|
|
||||||
index)
|
|
||||||
return buf.String(), errors.New(errorStr)
|
|
||||||
}
|
|
||||||
// Specify either dynamic or static.
|
|
||||||
if index[0] >= 'a' && index[0] <= 'z' {
|
|
||||||
ps.dynamicVar[index[0]] = val
|
|
||||||
} else if index[0] >= 'A' && index[0] <= 'Z' {
|
|
||||||
staticVar[index[0]] = val
|
|
||||||
}
|
|
||||||
case tokens[q][0][:2] == "%g":
|
|
||||||
// Push a variable from the stack as a dynamic or static variable.
|
|
||||||
index := tokens[q][3]
|
|
||||||
if len(index) > 1 {
|
|
||||||
errorStr := fmt.Sprintf("%s is not a valid static variables index",
|
|
||||||
index)
|
|
||||||
return buf.String(), errors.New(errorStr)
|
|
||||||
}
|
|
||||||
var val stacker
|
|
||||||
if index[0] >= 'a' && index[0] <= 'z' {
|
|
||||||
val = ps.dynamicVar[index[0]]
|
|
||||||
} else if index[0] >= 'A' && index[0] <= 'Z' {
|
|
||||||
val = staticVar[index[0]]
|
|
||||||
}
|
|
||||||
ps.st.push(val)
|
|
||||||
case tokens[q][0][:2] == "%'":
|
|
||||||
// Push a character constant.
|
|
||||||
con := tokens[q][4]
|
|
||||||
if len(con) > 1 {
|
|
||||||
errorStr := fmt.Sprintf("%s is not a valid character constant", con)
|
|
||||||
return buf.String(), errors.New(errorStr)
|
|
||||||
}
|
|
||||||
ps.st.push(con[0])
|
|
||||||
case tokens[q][0][:2] == "%{":
|
|
||||||
// Push an integer constant.
|
|
||||||
con, err := strconv.ParseInt(tokens[q][5], 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
}
|
|
||||||
ps.st.push(con)
|
|
||||||
case tokens[q][0][:2] == "%l":
|
|
||||||
// Push the length of the string that is popped from the stack.
|
|
||||||
popStr, err := ps.st.pop()
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
}
|
|
||||||
if _, ok := popStr.(string); !ok {
|
|
||||||
errStr := fmt.Sprintf("Stack head is not a string")
|
|
||||||
return buf.String(), errors.New(errStr)
|
|
||||||
}
|
|
||||||
ps.st.push(len(popStr.(string)))
|
|
||||||
case tokens[q][0][:2] == "%?":
|
|
||||||
// If-then-else construct. First, the whole string is identified and
|
|
||||||
// then inside this substring, we can specify which parts to switch on.
|
|
||||||
ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);")
|
|
||||||
ifTokens := ifReg.FindStringSubmatch(tokens[q][0])
|
|
||||||
var (
|
|
||||||
ifStr string
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
// Parse the if-part to determine if-else.
|
|
||||||
if len(ifTokens[1]) > 0 {
|
|
||||||
ifStr, err = ps.walk(ifTokens[1])
|
|
||||||
} else { // else
|
|
||||||
ifStr, err = ps.walk(ifTokens[4])
|
|
||||||
}
|
|
||||||
// Return any errors
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
} else if len(ifStr) > 0 {
|
|
||||||
// Self-defined limitation, not sure if this is correct, but didn't
|
|
||||||
// seem like it.
|
|
||||||
return buf.String(), errors.New("If-clause cannot print statements")
|
|
||||||
}
|
|
||||||
var thenStr string
|
|
||||||
// Pop the first value that is set by parsing the if-clause.
|
|
||||||
choose, err := ps.st.pop()
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
}
|
|
||||||
// Switch to if or else.
|
|
||||||
if choose.(int) == 0 && len(ifTokens[1]) > 0 {
|
|
||||||
thenStr, err = ps.walk(ifTokens[3])
|
|
||||||
} else if choose.(int) != 0 {
|
|
||||||
if len(ifTokens[1]) > 0 {
|
|
||||||
thenStr, err = ps.walk(ifTokens[2])
|
|
||||||
} else {
|
|
||||||
thenStr, err = ps.walk(ifTokens[5])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
}
|
|
||||||
buf.WriteString(thenStr)
|
|
||||||
case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing
|
|
||||||
fallthrough
|
|
||||||
case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits.
|
|
||||||
fallthrough
|
|
||||||
case tokens[q][0][len(tokens[q][0])-1] == 'x':
|
|
||||||
fallthrough
|
|
||||||
case tokens[q][0][len(tokens[q][0])-1] == 'X':
|
|
||||||
fallthrough
|
|
||||||
case tokens[q][0][len(tokens[q][0])-1] == 's':
|
|
||||||
token := tokens[q][0]
|
|
||||||
// Remove the : that comes before a flag.
|
|
||||||
if token[1] == ':' {
|
|
||||||
token = token[:1] + token[2:]
|
|
||||||
}
|
|
||||||
digit, err := ps.st.pop()
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
}
|
|
||||||
// The rest is determined like the normal formatted prints.
|
|
||||||
digitStr := fmt.Sprintf(token, digit.(int))
|
|
||||||
buf.WriteString(digitStr)
|
|
||||||
case tokens[q][0][:2] == "%i":
|
|
||||||
// Increment the parameters by one.
|
|
||||||
if len(ps.parameters) < 2 {
|
|
||||||
return buf.String(), errors.New("Not enough parameters to increment.")
|
|
||||||
}
|
|
||||||
val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int)
|
|
||||||
val1++
|
|
||||||
val2++
|
|
||||||
ps.parameters[0], ps.parameters[1] = val1, val2
|
|
||||||
default:
|
|
||||||
// The rest of the tokens is a special case, where two values are
|
|
||||||
// popped and then operated on by the token that comes after them.
|
|
||||||
op1, err := ps.st.pop()
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
}
|
|
||||||
op2, err := ps.st.pop()
|
|
||||||
if err != nil {
|
|
||||||
return buf.String(), err
|
|
||||||
}
|
|
||||||
var result stacker
|
|
||||||
switch tokens[q][0][:2] {
|
|
||||||
case "%+":
|
|
||||||
// Addition
|
|
||||||
result = op2.(int) + op1.(int)
|
|
||||||
case "%-":
|
|
||||||
// Subtraction
|
|
||||||
result = op2.(int) - op1.(int)
|
|
||||||
case "%*":
|
|
||||||
// Multiplication
|
|
||||||
result = op2.(int) * op1.(int)
|
|
||||||
case "%/":
|
|
||||||
// Division
|
|
||||||
result = op2.(int) / op1.(int)
|
|
||||||
case "%m":
|
|
||||||
// Modulo
|
|
||||||
result = op2.(int) % op1.(int)
|
|
||||||
case "%&":
|
|
||||||
// Bitwise AND
|
|
||||||
result = op2.(int) & op1.(int)
|
|
||||||
case "%|":
|
|
||||||
// Bitwise OR
|
|
||||||
result = op2.(int) | op1.(int)
|
|
||||||
case "%^":
|
|
||||||
// Bitwise XOR
|
|
||||||
result = op2.(int) ^ op1.(int)
|
|
||||||
case "%=":
|
|
||||||
// Equals
|
|
||||||
result = op2 == op1
|
|
||||||
case "%>":
|
|
||||||
// Greater-than
|
|
||||||
result = op2.(int) > op1.(int)
|
|
||||||
case "%<":
|
|
||||||
// Lesser-than
|
|
||||||
result = op2.(int) < op1.(int)
|
|
||||||
case "%A":
|
|
||||||
// Logical AND
|
|
||||||
result = op2.(bool) && op1.(bool)
|
|
||||||
case "%O":
|
|
||||||
// Logical OR
|
|
||||||
result = op2.(bool) || op1.(bool)
|
|
||||||
case "%!":
|
|
||||||
// Logical complement
|
|
||||||
result = !op1.(bool)
|
|
||||||
case "%~":
|
|
||||||
// Bitwise complement
|
|
||||||
result = ^(op1.(int))
|
|
||||||
}
|
|
||||||
ps.st.push(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
i = indices[q][1] - 1
|
|
||||||
q++
|
|
||||||
} else {
|
|
||||||
// We are not "inside" a token, so just skip until the end or the next
|
|
||||||
// token, and add all characters to the buffer.
|
|
||||||
j := i
|
|
||||||
if q != len(indices) {
|
|
||||||
for !(j >= indices[q][0] && j < indices[q][1]) {
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
j = len(attr)
|
|
||||||
}
|
|
||||||
buf.WriteString(string(attr[i:j]))
|
|
||||||
i = j
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Return the buffer as a string.
|
|
||||||
return buf.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Push a stacker-value onto the stack.
|
|
||||||
func (st *stack) push(s stacker) {
|
|
||||||
*st = append(*st, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pop a stacker-value from the stack.
|
|
||||||
func (st *stack) pop() (stacker, error) {
|
|
||||||
if len(*st) == 0 {
|
|
||||||
return nil, errors.New("Stack is empty.")
|
|
||||||
}
|
|
||||||
newStack := make(stack, len(*st)-1)
|
|
||||||
val := (*st)[len(*st)-1]
|
|
||||||
copy(newStack, (*st)[:len(*st)-1])
|
|
||||||
*st = newStack
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize regexes and the static vars (that don't get changed between
|
|
||||||
// calls.
|
|
||||||
func init() {
|
|
||||||
// Initialize the main regex.
|
|
||||||
expStr := strings.Join(exp[:], "|")
|
|
||||||
regex, _ = regexp.Compile(expStr)
|
|
||||||
// Initialize the static variables.
|
|
||||||
staticVar = make(map[byte]stacker, 26)
|
|
||||||
}
|
|
23
vendor/github.com/Nvveen/Gotty/types.go
generated
vendored
23
vendor/github.com/Nvveen/Gotty/types.go
generated
vendored
|
@ -1,23 +0,0 @@
|
||||||
// Copyright 2012 Neal van Veen. All rights reserved.
|
|
||||||
// Usage of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package gotty
|
|
||||||
|
|
||||||
type TermInfo struct {
|
|
||||||
boolAttributes map[string]bool
|
|
||||||
numAttributes map[string]int16
|
|
||||||
strAttributes map[string]string
|
|
||||||
// The various names of the TermInfo file.
|
|
||||||
Names []string
|
|
||||||
}
|
|
||||||
|
|
||||||
type stacker interface {
|
|
||||||
}
|
|
||||||
type stack []stacker
|
|
||||||
|
|
||||||
type parser struct {
|
|
||||||
st stack
|
|
||||||
parameters []stacker
|
|
||||||
dynamicVar map[byte]stacker
|
|
||||||
}
|
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
|
@ -1,20 +0,0 @@
|
||||||
Copyright (C) 2013 Blake Mizerany
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
File diff suppressed because it is too large
Load diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
|
@ -1,316 +0,0 @@
|
||||||
// Package quantile computes approximate quantiles over an unbounded data
|
|
||||||
// stream within low memory and CPU bounds.
|
|
||||||
//
|
|
||||||
// A small amount of accuracy is traded to achieve the above properties.
|
|
||||||
//
|
|
||||||
// Multiple streams can be merged before calling Query to generate a single set
|
|
||||||
// of results. This is meaningful when the streams represent the same type of
|
|
||||||
// data. See Merge and Samples.
|
|
||||||
//
|
|
||||||
// For more detailed information about the algorithm used, see:
|
|
||||||
//
|
|
||||||
// Effective Computation of Biased Quantiles over Data Streams
|
|
||||||
//
|
|
||||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
|
||||||
package quantile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sample holds an observed value and meta information for compression. JSON
|
|
||||||
// tags have been added for convenience.
|
|
||||||
type Sample struct {
|
|
||||||
Value float64 `json:",string"`
|
|
||||||
Width float64 `json:",string"`
|
|
||||||
Delta float64 `json:",string"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Samples represents a slice of samples. It implements sort.Interface.
|
|
||||||
type Samples []Sample
|
|
||||||
|
|
||||||
func (a Samples) Len() int { return len(a) }
|
|
||||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
|
||||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
||||||
|
|
||||||
type invariant func(s *stream, r float64) float64
|
|
||||||
|
|
||||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
|
||||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|
||||||
// error guarantees can still be given even for the lower ranks of the data
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|
||||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|
||||||
// properties.
|
|
||||||
func NewLowBiased(epsilon float64) *Stream {
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
return 2 * epsilon * r
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
|
||||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|
||||||
// error guarantees can still be given even for the higher ranks of the data
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|
||||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|
||||||
// properties.
|
|
||||||
func NewHighBiased(epsilon float64) *Stream {
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
return 2 * epsilon * (s.n - r)
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
|
||||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
|
||||||
// space and computation time. The targets map maps the desired quantiles to
|
|
||||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
|
||||||
// is guaranteed to be within (Quantile±Epsilon).
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
|
||||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
|
||||||
// Convert map to slice to avoid slow iterations on a map.
|
|
||||||
// ƒ is called on the hot path, so converting the map to a slice
|
|
||||||
// beforehand results in significant CPU savings.
|
|
||||||
targets := targetMapToSlice(targetMap)
|
|
||||||
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
var m = math.MaxFloat64
|
|
||||||
var f float64
|
|
||||||
for _, t := range targets {
|
|
||||||
if t.quantile*s.n <= r {
|
|
||||||
f = (2 * t.epsilon * r) / t.quantile
|
|
||||||
} else {
|
|
||||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
|
||||||
}
|
|
||||||
if f < m {
|
|
||||||
m = f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
type target struct {
|
|
||||||
quantile float64
|
|
||||||
epsilon float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
|
||||||
targets := make([]target, 0, len(targetMap))
|
|
||||||
|
|
||||||
for quantile, epsilon := range targetMap {
|
|
||||||
t := target{
|
|
||||||
quantile: quantile,
|
|
||||||
epsilon: epsilon,
|
|
||||||
}
|
|
||||||
targets = append(targets, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
return targets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
|
||||||
// design. Take care when using across multiple goroutines.
|
|
||||||
type Stream struct {
|
|
||||||
*stream
|
|
||||||
b Samples
|
|
||||||
sorted bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStream(ƒ invariant) *Stream {
|
|
||||||
x := &stream{ƒ: ƒ}
|
|
||||||
return &Stream{x, make(Samples, 0, 500), true}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert inserts v into the stream.
|
|
||||||
func (s *Stream) Insert(v float64) {
|
|
||||||
s.insert(Sample{Value: v, Width: 1})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) insert(sample Sample) {
|
|
||||||
s.b = append(s.b, sample)
|
|
||||||
s.sorted = false
|
|
||||||
if len(s.b) == cap(s.b) {
|
|
||||||
s.flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query returns the computed qth percentiles value. If s was created with
|
|
||||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
|
||||||
// will return an unspecified result.
|
|
||||||
func (s *Stream) Query(q float64) float64 {
|
|
||||||
if !s.flushed() {
|
|
||||||
// Fast path when there hasn't been enough data for a flush;
|
|
||||||
// this also yields better accuracy for small sets of data.
|
|
||||||
l := len(s.b)
|
|
||||||
if l == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
i := int(math.Ceil(float64(l) * q))
|
|
||||||
if i > 0 {
|
|
||||||
i -= 1
|
|
||||||
}
|
|
||||||
s.maybeSort()
|
|
||||||
return s.b[i].Value
|
|
||||||
}
|
|
||||||
s.flush()
|
|
||||||
return s.stream.query(q)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge merges samples into the underlying streams samples. This is handy when
|
|
||||||
// merging multiple streams from separate threads, database shards, etc.
|
|
||||||
//
|
|
||||||
// ATTENTION: This method is broken and does not yield correct results. The
|
|
||||||
// underlying algorithm is not capable of merging streams correctly.
|
|
||||||
func (s *Stream) Merge(samples Samples) {
|
|
||||||
sort.Sort(samples)
|
|
||||||
s.stream.merge(samples)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
|
||||||
func (s *Stream) Reset() {
|
|
||||||
s.stream.reset()
|
|
||||||
s.b = s.b[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Samples returns stream samples held by s.
|
|
||||||
func (s *Stream) Samples() Samples {
|
|
||||||
if !s.flushed() {
|
|
||||||
return s.b
|
|
||||||
}
|
|
||||||
s.flush()
|
|
||||||
return s.stream.samples()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the total number of samples observed in the stream
|
|
||||||
// since initialization.
|
|
||||||
func (s *Stream) Count() int {
|
|
||||||
return len(s.b) + s.stream.count()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) flush() {
|
|
||||||
s.maybeSort()
|
|
||||||
s.stream.merge(s.b)
|
|
||||||
s.b = s.b[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) maybeSort() {
|
|
||||||
if !s.sorted {
|
|
||||||
s.sorted = true
|
|
||||||
sort.Sort(s.b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) flushed() bool {
|
|
||||||
return len(s.stream.l) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type stream struct {
|
|
||||||
n float64
|
|
||||||
l []Sample
|
|
||||||
ƒ invariant
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) reset() {
|
|
||||||
s.l = s.l[:0]
|
|
||||||
s.n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) insert(v float64) {
|
|
||||||
s.merge(Samples{{v, 1, 0}})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) merge(samples Samples) {
|
|
||||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
|
||||||
// whole summaries. The paper doesn't mention merging summaries at
|
|
||||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
|
||||||
// do merges properly.
|
|
||||||
var r float64
|
|
||||||
i := 0
|
|
||||||
for _, sample := range samples {
|
|
||||||
for ; i < len(s.l); i++ {
|
|
||||||
c := s.l[i]
|
|
||||||
if c.Value > sample.Value {
|
|
||||||
// Insert at position i.
|
|
||||||
s.l = append(s.l, Sample{})
|
|
||||||
copy(s.l[i+1:], s.l[i:])
|
|
||||||
s.l[i] = Sample{
|
|
||||||
sample.Value,
|
|
||||||
sample.Width,
|
|
||||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
|
||||||
// TODO(beorn7): How to calculate delta correctly?
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
goto inserted
|
|
||||||
}
|
|
||||||
r += c.Width
|
|
||||||
}
|
|
||||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
|
||||||
i++
|
|
||||||
inserted:
|
|
||||||
s.n += sample.Width
|
|
||||||
r += sample.Width
|
|
||||||
}
|
|
||||||
s.compress()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) count() int {
|
|
||||||
return int(s.n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) query(q float64) float64 {
|
|
||||||
t := math.Ceil(q * s.n)
|
|
||||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
|
||||||
p := s.l[0]
|
|
||||||
var r float64
|
|
||||||
for _, c := range s.l[1:] {
|
|
||||||
r += p.Width
|
|
||||||
if r+c.Width+c.Delta > t {
|
|
||||||
return p.Value
|
|
||||||
}
|
|
||||||
p = c
|
|
||||||
}
|
|
||||||
return p.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) compress() {
|
|
||||||
if len(s.l) < 2 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
x := s.l[len(s.l)-1]
|
|
||||||
xi := len(s.l) - 1
|
|
||||||
r := s.n - 1 - x.Width
|
|
||||||
|
|
||||||
for i := len(s.l) - 2; i >= 0; i-- {
|
|
||||||
c := s.l[i]
|
|
||||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
|
||||||
x.Width += c.Width
|
|
||||||
s.l[xi] = x
|
|
||||||
// Remove element at i.
|
|
||||||
copy(s.l[i:], s.l[i+1:])
|
|
||||||
s.l = s.l[:len(s.l)-1]
|
|
||||||
xi -= 1
|
|
||||||
} else {
|
|
||||||
x = c
|
|
||||||
xi = i
|
|
||||||
}
|
|
||||||
r -= c.Width
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) samples() Samples {
|
|
||||||
samples := make(Samples, len(s.l))
|
|
||||||
copy(samples, s.l)
|
|
||||||
return samples
|
|
||||||
}
|
|
|
@ -176,7 +176,7 @@
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
Copyright 2013-2017 Docker, Inc.
|
Copyright The containerd Authors
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
78
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
78
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package errdefs defines the common errors used throughout containerd
|
||||||
|
// packages.
|
||||||
|
//
|
||||||
|
// Use with errors.Wrap and error.Wrapf to add context to an error.
|
||||||
|
//
|
||||||
|
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||||
|
// is of a certain type.
|
||||||
|
//
|
||||||
|
// The functions ToGRPC and FromGRPC can be used to map server-side and
|
||||||
|
// client-side errors to the correct types.
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import "github.com/pkg/errors"
|
||||||
|
|
||||||
|
// Definitions of common error types used throughout containerd. All containerd
|
||||||
|
// errors returned by most packages will map into one of these errors classes.
|
||||||
|
// Packages should return errors of these types when they want to instruct a
|
||||||
|
// client to take a particular action.
|
||||||
|
//
|
||||||
|
// For the most part, we just try to provide local grpc errors. Most conditions
|
||||||
|
// map very well to those defined by grpc.
|
||||||
|
var (
|
||||||
|
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||||
|
ErrInvalidArgument = errors.New("invalid argument")
|
||||||
|
ErrNotFound = errors.New("not found")
|
||||||
|
ErrAlreadyExists = errors.New("already exists")
|
||||||
|
ErrFailedPrecondition = errors.New("failed precondition")
|
||||||
|
ErrUnavailable = errors.New("unavailable")
|
||||||
|
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||||
|
func IsInvalidArgument(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrInvalidArgument
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNotFound returns true if the error is due to a missing object
|
||||||
|
func IsNotFound(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAlreadyExists returns true if the error is due to an already existing
|
||||||
|
// metadata item
|
||||||
|
func IsAlreadyExists(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrAlreadyExists
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||||
|
// lack of a particular condition
|
||||||
|
func IsFailedPrecondition(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrFailedPrecondition
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||||
|
func IsUnavailable(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrUnavailable
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNotImplemented returns true if the error is due to not being implemented
|
||||||
|
func IsNotImplemented(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrNotImplemented
|
||||||
|
}
|
138
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
138
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||||
|
// using the original error message as a description.
|
||||||
|
//
|
||||||
|
// Further information may be extracted from certain errors depending on their
|
||||||
|
// type.
|
||||||
|
//
|
||||||
|
// If the error is unmapped, the original error will be returned to be handled
|
||||||
|
// by the regular grpc error handling stack.
|
||||||
|
func ToGRPC(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGRPCError(err) {
|
||||||
|
// error has already been mapped to grpc
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case IsInvalidArgument(err):
|
||||||
|
return status.Errorf(codes.InvalidArgument, err.Error())
|
||||||
|
case IsNotFound(err):
|
||||||
|
return status.Errorf(codes.NotFound, err.Error())
|
||||||
|
case IsAlreadyExists(err):
|
||||||
|
return status.Errorf(codes.AlreadyExists, err.Error())
|
||||||
|
case IsFailedPrecondition(err):
|
||||||
|
return status.Errorf(codes.FailedPrecondition, err.Error())
|
||||||
|
case IsUnavailable(err):
|
||||||
|
return status.Errorf(codes.Unavailable, err.Error())
|
||||||
|
case IsNotImplemented(err):
|
||||||
|
return status.Errorf(codes.Unimplemented, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||||
|
// and combining it with the target error string.
|
||||||
|
//
|
||||||
|
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
|
||||||
|
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||||
|
return ToGRPC(errors.Wrapf(err, format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||||
|
func FromGRPC(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cls error // divide these into error classes, becomes the cause
|
||||||
|
|
||||||
|
switch code(err) {
|
||||||
|
case codes.InvalidArgument:
|
||||||
|
cls = ErrInvalidArgument
|
||||||
|
case codes.AlreadyExists:
|
||||||
|
cls = ErrAlreadyExists
|
||||||
|
case codes.NotFound:
|
||||||
|
cls = ErrNotFound
|
||||||
|
case codes.Unavailable:
|
||||||
|
cls = ErrUnavailable
|
||||||
|
case codes.FailedPrecondition:
|
||||||
|
cls = ErrFailedPrecondition
|
||||||
|
case codes.Unimplemented:
|
||||||
|
cls = ErrNotImplemented
|
||||||
|
default:
|
||||||
|
cls = ErrUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := rebaseMessage(cls, err)
|
||||||
|
if msg != "" {
|
||||||
|
err = errors.Wrapf(cls, msg)
|
||||||
|
} else {
|
||||||
|
err = errors.WithStack(cls)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// rebaseMessage removes the repeats for an error at the end of an error
|
||||||
|
// string. This will happen when taking an error over grpc then remapping it.
|
||||||
|
//
|
||||||
|
// Effectively, we just remove the string of cls from the end of err if it
|
||||||
|
// appears there.
|
||||||
|
func rebaseMessage(cls error, err error) string {
|
||||||
|
desc := errDesc(err)
|
||||||
|
clss := cls.Error()
|
||||||
|
if desc == clss {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.TrimSuffix(desc, ": "+clss)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isGRPCError(err error) bool {
|
||||||
|
_, ok := status.FromError(err)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func code(err error) codes.Code {
|
||||||
|
if s, ok := status.FromError(err); ok {
|
||||||
|
return s.Code()
|
||||||
|
}
|
||||||
|
return codes.Unknown
|
||||||
|
}
|
||||||
|
|
||||||
|
func errDesc(err error) string {
|
||||||
|
if s, ok := status.FromError(err); ok {
|
||||||
|
return s.Message()
|
||||||
|
}
|
||||||
|
return err.Error()
|
||||||
|
}
|
16
vendor/github.com/containerd/continuity/AUTHORS
generated
vendored
16
vendor/github.com/containerd/continuity/AUTHORS
generated
vendored
|
@ -1,16 +0,0 @@
|
||||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
|
||||||
Akash Gupta <akagup@microsoft.com>
|
|
||||||
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
|
||||||
Andrew Pennebaker <apennebaker@datapipe.com>
|
|
||||||
Brandon Philips <brandon.philips@coreos.com>
|
|
||||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
|
||||||
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
|
||||||
Derek McGowan <derek@mcgstyle.net>
|
|
||||||
Edward Pilatowicz <edward.pilatowicz@oracle.com>
|
|
||||||
Ian Campbell <ijc@docker.com>
|
|
||||||
Justin Cormack <justin.cormack@docker.com>
|
|
||||||
Justin Cummins <sul3n3t@gmail.com>
|
|
||||||
Phil Estes <estesp@gmail.com>
|
|
||||||
Stephen J Day <stephen.day@docker.com>
|
|
||||||
Tobias Klauser <tklauser@distanz.ch>
|
|
||||||
Tonis Tiigi <tonistiigi@gmail.com>
|
|
202
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
202
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
|
@ -1,202 +0,0 @@
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright {yyyy} {name of copyright owner}
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
|
101
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
101
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
|
@ -1,101 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package pathdriver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PathDriver provides all of the path manipulation functions in a common
|
|
||||||
// interface. The context should call these and never use the `filepath`
|
|
||||||
// package or any other package to manipulate paths.
|
|
||||||
type PathDriver interface {
|
|
||||||
Join(paths ...string) string
|
|
||||||
IsAbs(path string) bool
|
|
||||||
Rel(base, target string) (string, error)
|
|
||||||
Base(path string) string
|
|
||||||
Dir(path string) string
|
|
||||||
Clean(path string) string
|
|
||||||
Split(path string) (dir, file string)
|
|
||||||
Separator() byte
|
|
||||||
Abs(path string) (string, error)
|
|
||||||
Walk(string, filepath.WalkFunc) error
|
|
||||||
FromSlash(path string) string
|
|
||||||
ToSlash(path string) string
|
|
||||||
Match(pattern, name string) (matched bool, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// pathDriver is a simple default implementation calls the filepath package.
|
|
||||||
type pathDriver struct{}
|
|
||||||
|
|
||||||
// LocalPathDriver is the exported pathDriver struct for convenience.
|
|
||||||
var LocalPathDriver PathDriver = &pathDriver{}
|
|
||||||
|
|
||||||
func (*pathDriver) Join(paths ...string) string {
|
|
||||||
return filepath.Join(paths...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) IsAbs(path string) bool {
|
|
||||||
return filepath.IsAbs(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Rel(base, target string) (string, error) {
|
|
||||||
return filepath.Rel(base, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Base(path string) string {
|
|
||||||
return filepath.Base(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Dir(path string) string {
|
|
||||||
return filepath.Dir(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Clean(path string) string {
|
|
||||||
return filepath.Clean(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Split(path string) (dir, file string) {
|
|
||||||
return filepath.Split(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Separator() byte {
|
|
||||||
return filepath.Separator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Abs(path string) (string, error) {
|
|
||||||
return filepath.Abs(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note that filepath.Walk calls os.Stat, so if the context wants to
|
|
||||||
// to call Driver.Stat() for Walk, they need to create a new struct that
|
|
||||||
// overrides this method.
|
|
||||||
func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
|
|
||||||
return filepath.Walk(root, walkFn)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) FromSlash(path string) string {
|
|
||||||
return filepath.FromSlash(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) ToSlash(path string) string {
|
|
||||||
return filepath.ToSlash(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Match(pattern, name string) (bool, error) {
|
|
||||||
return filepath.Match(pattern, name)
|
|
||||||
}
|
|
516
vendor/github.com/coreos/clair/api/v3/clairpb/clair.pb.go
generated
vendored
516
vendor/github.com/coreos/clair/api/v3/clairpb/clair.pb.go
generated
vendored
|
@ -9,6 +9,8 @@ It is generated from these files:
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
Vulnerability
|
Vulnerability
|
||||||
|
Detector
|
||||||
|
Namespace
|
||||||
Feature
|
Feature
|
||||||
Layer
|
Layer
|
||||||
ClairStatus
|
ClairStatus
|
||||||
|
@ -48,12 +50,37 @@ var _ = math.Inf
|
||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
type Detector_DType int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
Detector_DETECTOR_D_TYPE_INVALID Detector_DType = 0
|
||||||
|
Detector_DETECTOR_D_TYPE_NAMESPACE Detector_DType = 1
|
||||||
|
Detector_DETECTOR_D_TYPE_FEATURE Detector_DType = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
var Detector_DType_name = map[int32]string{
|
||||||
|
0: "DETECTOR_D_TYPE_INVALID",
|
||||||
|
1: "DETECTOR_D_TYPE_NAMESPACE",
|
||||||
|
2: "DETECTOR_D_TYPE_FEATURE",
|
||||||
|
}
|
||||||
|
var Detector_DType_value = map[string]int32{
|
||||||
|
"DETECTOR_D_TYPE_INVALID": 0,
|
||||||
|
"DETECTOR_D_TYPE_NAMESPACE": 1,
|
||||||
|
"DETECTOR_D_TYPE_FEATURE": 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Detector_DType) String() string {
|
||||||
|
return proto.EnumName(Detector_DType_name, int32(x))
|
||||||
|
}
|
||||||
|
func (Detector_DType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
|
||||||
|
|
||||||
type Vulnerability struct {
|
type Vulnerability struct {
|
||||||
// The name of the vulnerability.
|
// The name of the vulnerability.
|
||||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
// The name of the namespace in which the vulnerability was detected.
|
// The name of the namespace in which the vulnerability was detected.
|
||||||
NamespaceName string `protobuf:"bytes,2,opt,name=namespace_name,json=namespaceName" json:"namespace_name,omitempty"`
|
NamespaceName string `protobuf:"bytes,2,opt,name=namespace_name,json=namespaceName" json:"namespace_name,omitempty"`
|
||||||
// A description of the vulnerability according to the source for the namespace.
|
// A description of the vulnerability according to the source for the
|
||||||
|
// namespace.
|
||||||
Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
|
Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
|
||||||
// A link to the vulnerability according to the source for the namespace.
|
// A link to the vulnerability according to the source for the namespace.
|
||||||
Link string `protobuf:"bytes,4,opt,name=link" json:"link,omitempty"`
|
Link string `protobuf:"bytes,4,opt,name=link" json:"link,omitempty"`
|
||||||
|
@ -130,23 +157,91 @@ func (m *Vulnerability) GetAffectedVersions() []*Feature {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Detector struct {
|
||||||
|
// The name of the detector.
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
|
// The version of the detector.
|
||||||
|
Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
|
||||||
|
// The type of the detector.
|
||||||
|
Dtype Detector_DType `protobuf:"varint,3,opt,name=dtype,enum=coreos.clair.Detector_DType" json:"dtype,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Detector) Reset() { *m = Detector{} }
|
||||||
|
func (m *Detector) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Detector) ProtoMessage() {}
|
||||||
|
func (*Detector) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||||
|
|
||||||
|
func (m *Detector) GetName() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Detector) GetVersion() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Version
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Detector) GetDtype() Detector_DType {
|
||||||
|
if m != nil {
|
||||||
|
return m.Dtype
|
||||||
|
}
|
||||||
|
return Detector_DETECTOR_D_TYPE_INVALID
|
||||||
|
}
|
||||||
|
|
||||||
|
type Namespace struct {
|
||||||
|
// The name of the namespace.
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
|
// The detector used to detect the namespace. This only exists when present
|
||||||
|
// in an Ancestry Feature.
|
||||||
|
Detector *Detector `protobuf:"bytes,2,opt,name=detector" json:"detector,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Namespace) Reset() { *m = Namespace{} }
|
||||||
|
func (m *Namespace) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Namespace) ProtoMessage() {}
|
||||||
|
func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||||
|
|
||||||
|
func (m *Namespace) GetName() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Namespace) GetDetector() *Detector {
|
||||||
|
if m != nil {
|
||||||
|
return m.Detector
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type Feature struct {
|
type Feature struct {
|
||||||
// The name of the feature.
|
// The name of the feature.
|
||||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
// The name of the namespace in which the feature is detected.
|
// The namespace in which the feature is detected.
|
||||||
NamespaceName string `protobuf:"bytes,2,opt,name=namespace_name,json=namespaceName" json:"namespace_name,omitempty"`
|
Namespace *Namespace `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"`
|
||||||
// The specific version of this feature.
|
// The specific version of this feature.
|
||||||
Version string `protobuf:"bytes,3,opt,name=version" json:"version,omitempty"`
|
Version string `protobuf:"bytes,3,opt,name=version" json:"version,omitempty"`
|
||||||
// The format used to parse version numbers for the feature.
|
// The format used to parse version numbers for the feature.
|
||||||
VersionFormat string `protobuf:"bytes,4,opt,name=version_format,json=versionFormat" json:"version_format,omitempty"`
|
VersionFormat string `protobuf:"bytes,4,opt,name=version_format,json=versionFormat" json:"version_format,omitempty"`
|
||||||
|
// The detector used to detect this feature. This only exists when present
|
||||||
|
// in an Ancestry.
|
||||||
|
Detector *Detector `protobuf:"bytes,5,opt,name=detector" json:"detector,omitempty"`
|
||||||
// The list of vulnerabilities that affect the feature.
|
// The list of vulnerabilities that affect the feature.
|
||||||
Vulnerabilities []*Vulnerability `protobuf:"bytes,5,rep,name=vulnerabilities" json:"vulnerabilities,omitempty"`
|
Vulnerabilities []*Vulnerability `protobuf:"bytes,6,rep,name=vulnerabilities" json:"vulnerabilities,omitempty"`
|
||||||
|
// The feature type indicates if the feature represents a source package or
|
||||||
|
// binary package.
|
||||||
|
FeatureType string `protobuf:"bytes,7,opt,name=feature_type,json=featureType" json:"feature_type,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Feature) Reset() { *m = Feature{} }
|
func (m *Feature) Reset() { *m = Feature{} }
|
||||||
func (m *Feature) String() string { return proto.CompactTextString(m) }
|
func (m *Feature) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Feature) ProtoMessage() {}
|
func (*Feature) ProtoMessage() {}
|
||||||
func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||||
|
|
||||||
func (m *Feature) GetName() string {
|
func (m *Feature) GetName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -155,11 +250,11 @@ func (m *Feature) GetName() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Feature) GetNamespaceName() string {
|
func (m *Feature) GetNamespace() *Namespace {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.NamespaceName
|
return m.Namespace
|
||||||
}
|
}
|
||||||
return ""
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Feature) GetVersion() string {
|
func (m *Feature) GetVersion() string {
|
||||||
|
@ -176,6 +271,13 @@ func (m *Feature) GetVersionFormat() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Feature) GetDetector() *Detector {
|
||||||
|
if m != nil {
|
||||||
|
return m.Detector
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Feature) GetVulnerabilities() []*Vulnerability {
|
func (m *Feature) GetVulnerabilities() []*Vulnerability {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.Vulnerabilities
|
return m.Vulnerabilities
|
||||||
|
@ -183,6 +285,13 @@ func (m *Feature) GetVulnerabilities() []*Vulnerability {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Feature) GetFeatureType() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.FeatureType
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
type Layer struct {
|
type Layer struct {
|
||||||
// The sha256 tarsum for the layer.
|
// The sha256 tarsum for the layer.
|
||||||
Hash string `protobuf:"bytes,1,opt,name=hash" json:"hash,omitempty"`
|
Hash string `protobuf:"bytes,1,opt,name=hash" json:"hash,omitempty"`
|
||||||
|
@ -191,7 +300,7 @@ type Layer struct {
|
||||||
func (m *Layer) Reset() { *m = Layer{} }
|
func (m *Layer) Reset() { *m = Layer{} }
|
||||||
func (m *Layer) String() string { return proto.CompactTextString(m) }
|
func (m *Layer) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Layer) ProtoMessage() {}
|
func (*Layer) ProtoMessage() {}
|
||||||
func (*Layer) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
func (*Layer) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||||
|
|
||||||
func (m *Layer) GetHash() string {
|
func (m *Layer) GetHash() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -201,27 +310,18 @@ func (m *Layer) GetHash() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClairStatus struct {
|
type ClairStatus struct {
|
||||||
// The configured list of feature listers used to scan an ancestry.
|
// The implemented detectors in this Clair instance
|
||||||
Listers []string `protobuf:"bytes,1,rep,name=listers" json:"listers,omitempty"`
|
Detectors []*Detector `protobuf:"bytes,1,rep,name=detectors" json:"detectors,omitempty"`
|
||||||
// The configured list of namespace detectors used to scan an ancestry.
|
|
||||||
Detectors []string `protobuf:"bytes,2,rep,name=detectors" json:"detectors,omitempty"`
|
|
||||||
// The time at which the updater last ran.
|
// The time at which the updater last ran.
|
||||||
LastUpdateTime *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime" json:"last_update_time,omitempty"`
|
LastUpdateTime *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=last_update_time,json=lastUpdateTime" json:"last_update_time,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ClairStatus) Reset() { *m = ClairStatus{} }
|
func (m *ClairStatus) Reset() { *m = ClairStatus{} }
|
||||||
func (m *ClairStatus) String() string { return proto.CompactTextString(m) }
|
func (m *ClairStatus) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ClairStatus) ProtoMessage() {}
|
func (*ClairStatus) ProtoMessage() {}
|
||||||
func (*ClairStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
func (*ClairStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||||
|
|
||||||
func (m *ClairStatus) GetListers() []string {
|
func (m *ClairStatus) GetDetectors() []*Detector {
|
||||||
if m != nil {
|
|
||||||
return m.Listers
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ClairStatus) GetDetectors() []string {
|
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.Detectors
|
return m.Detectors
|
||||||
}
|
}
|
||||||
|
@ -243,7 +343,7 @@ type GetAncestryRequest struct {
|
||||||
func (m *GetAncestryRequest) Reset() { *m = GetAncestryRequest{} }
|
func (m *GetAncestryRequest) Reset() { *m = GetAncestryRequest{} }
|
||||||
func (m *GetAncestryRequest) String() string { return proto.CompactTextString(m) }
|
func (m *GetAncestryRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetAncestryRequest) ProtoMessage() {}
|
func (*GetAncestryRequest) ProtoMessage() {}
|
||||||
func (*GetAncestryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
func (*GetAncestryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||||
|
|
||||||
func (m *GetAncestryRequest) GetAncestryName() string {
|
func (m *GetAncestryRequest) GetAncestryName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -255,14 +355,14 @@ func (m *GetAncestryRequest) GetAncestryName() string {
|
||||||
type GetAncestryResponse struct {
|
type GetAncestryResponse struct {
|
||||||
// The ancestry requested.
|
// The ancestry requested.
|
||||||
Ancestry *GetAncestryResponse_Ancestry `protobuf:"bytes,1,opt,name=ancestry" json:"ancestry,omitempty"`
|
Ancestry *GetAncestryResponse_Ancestry `protobuf:"bytes,1,opt,name=ancestry" json:"ancestry,omitempty"`
|
||||||
// The status of Clair at the time of the request.
|
// The status of Clair at the time of the request
|
||||||
Status *ClairStatus `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"`
|
Status *ClairStatus `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GetAncestryResponse) Reset() { *m = GetAncestryResponse{} }
|
func (m *GetAncestryResponse) Reset() { *m = GetAncestryResponse{} }
|
||||||
func (m *GetAncestryResponse) String() string { return proto.CompactTextString(m) }
|
func (m *GetAncestryResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetAncestryResponse) ProtoMessage() {}
|
func (*GetAncestryResponse) ProtoMessage() {}
|
||||||
func (*GetAncestryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
func (*GetAncestryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||||
|
|
||||||
func (m *GetAncestryResponse) GetAncestry() *GetAncestryResponse_Ancestry {
|
func (m *GetAncestryResponse) GetAncestry() *GetAncestryResponse_Ancestry {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -289,7 +389,7 @@ func (m *GetAncestryResponse_AncestryLayer) Reset() { *m = GetAncestryRe
|
||||||
func (m *GetAncestryResponse_AncestryLayer) String() string { return proto.CompactTextString(m) }
|
func (m *GetAncestryResponse_AncestryLayer) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetAncestryResponse_AncestryLayer) ProtoMessage() {}
|
func (*GetAncestryResponse_AncestryLayer) ProtoMessage() {}
|
||||||
func (*GetAncestryResponse_AncestryLayer) Descriptor() ([]byte, []int) {
|
func (*GetAncestryResponse_AncestryLayer) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor0, []int{5, 0}
|
return fileDescriptor0, []int{7, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GetAncestryResponse_AncestryLayer) GetLayer() *Layer {
|
func (m *GetAncestryResponse_AncestryLayer) GetLayer() *Layer {
|
||||||
|
@ -309,18 +409,14 @@ func (m *GetAncestryResponse_AncestryLayer) GetDetectedFeatures() []*Feature {
|
||||||
type GetAncestryResponse_Ancestry struct {
|
type GetAncestryResponse_Ancestry struct {
|
||||||
// The name of the desired ancestry.
|
// The name of the desired ancestry.
|
||||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
// The configured list of feature listers used to scan this ancestry.
|
|
||||||
ScannedListers []string `protobuf:"bytes,4,rep,name=scanned_listers,json=scannedListers" json:"scanned_listers,omitempty"`
|
|
||||||
// The configured list of namespace detectors used to scan an ancestry.
|
|
||||||
ScannedDetectors []string `protobuf:"bytes,5,rep,name=scanned_detectors,json=scannedDetectors" json:"scanned_detectors,omitempty"`
|
|
||||||
// The list of layers along with detected features in each.
|
// The list of layers along with detected features in each.
|
||||||
Layers []*GetAncestryResponse_AncestryLayer `protobuf:"bytes,6,rep,name=layers" json:"layers,omitempty"`
|
Layers []*GetAncestryResponse_AncestryLayer `protobuf:"bytes,3,rep,name=layers" json:"layers,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GetAncestryResponse_Ancestry) Reset() { *m = GetAncestryResponse_Ancestry{} }
|
func (m *GetAncestryResponse_Ancestry) Reset() { *m = GetAncestryResponse_Ancestry{} }
|
||||||
func (m *GetAncestryResponse_Ancestry) String() string { return proto.CompactTextString(m) }
|
func (m *GetAncestryResponse_Ancestry) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetAncestryResponse_Ancestry) ProtoMessage() {}
|
func (*GetAncestryResponse_Ancestry) ProtoMessage() {}
|
||||||
func (*GetAncestryResponse_Ancestry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 1} }
|
func (*GetAncestryResponse_Ancestry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 1} }
|
||||||
|
|
||||||
func (m *GetAncestryResponse_Ancestry) GetName() string {
|
func (m *GetAncestryResponse_Ancestry) GetName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -329,20 +425,6 @@ func (m *GetAncestryResponse_Ancestry) GetName() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GetAncestryResponse_Ancestry) GetScannedListers() []string {
|
|
||||||
if m != nil {
|
|
||||||
return m.ScannedListers
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *GetAncestryResponse_Ancestry) GetScannedDetectors() []string {
|
|
||||||
if m != nil {
|
|
||||||
return m.ScannedDetectors
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *GetAncestryResponse_Ancestry) GetLayers() []*GetAncestryResponse_AncestryLayer {
|
func (m *GetAncestryResponse_Ancestry) GetLayers() []*GetAncestryResponse_AncestryLayer {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.Layers
|
return m.Layers
|
||||||
|
@ -364,7 +446,7 @@ type PostAncestryRequest struct {
|
||||||
func (m *PostAncestryRequest) Reset() { *m = PostAncestryRequest{} }
|
func (m *PostAncestryRequest) Reset() { *m = PostAncestryRequest{} }
|
||||||
func (m *PostAncestryRequest) String() string { return proto.CompactTextString(m) }
|
func (m *PostAncestryRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*PostAncestryRequest) ProtoMessage() {}
|
func (*PostAncestryRequest) ProtoMessage() {}
|
||||||
func (*PostAncestryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
func (*PostAncestryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
||||||
|
|
||||||
func (m *PostAncestryRequest) GetAncestryName() string {
|
func (m *PostAncestryRequest) GetAncestryName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -390,9 +472,10 @@ func (m *PostAncestryRequest) GetLayers() []*PostAncestryRequest_PostLayer {
|
||||||
type PostAncestryRequest_PostLayer struct {
|
type PostAncestryRequest_PostLayer struct {
|
||||||
// The hash of the layer.
|
// The hash of the layer.
|
||||||
Hash string `protobuf:"bytes,1,opt,name=hash" json:"hash,omitempty"`
|
Hash string `protobuf:"bytes,1,opt,name=hash" json:"hash,omitempty"`
|
||||||
// The location of the layer (URL or filepath).
|
// The location of the layer (URL or file path).
|
||||||
Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"`
|
Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"`
|
||||||
// Any HTTP Headers that need to be used if requesting a layer over HTTP(S).
|
// Any HTTP Headers that need to be used if requesting a layer over
|
||||||
|
// HTTP(S).
|
||||||
Headers map[string]string `protobuf:"bytes,3,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
Headers map[string]string `protobuf:"bytes,3,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,7 +483,7 @@ func (m *PostAncestryRequest_PostLayer) Reset() { *m = PostAncestryReque
|
||||||
func (m *PostAncestryRequest_PostLayer) String() string { return proto.CompactTextString(m) }
|
func (m *PostAncestryRequest_PostLayer) String() string { return proto.CompactTextString(m) }
|
||||||
func (*PostAncestryRequest_PostLayer) ProtoMessage() {}
|
func (*PostAncestryRequest_PostLayer) ProtoMessage() {}
|
||||||
func (*PostAncestryRequest_PostLayer) Descriptor() ([]byte, []int) {
|
func (*PostAncestryRequest_PostLayer) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor0, []int{6, 0}
|
return fileDescriptor0, []int{8, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PostAncestryRequest_PostLayer) GetHash() string {
|
func (m *PostAncestryRequest_PostLayer) GetHash() string {
|
||||||
|
@ -432,7 +515,7 @@ type PostAncestryResponse struct {
|
||||||
func (m *PostAncestryResponse) Reset() { *m = PostAncestryResponse{} }
|
func (m *PostAncestryResponse) Reset() { *m = PostAncestryResponse{} }
|
||||||
func (m *PostAncestryResponse) String() string { return proto.CompactTextString(m) }
|
func (m *PostAncestryResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*PostAncestryResponse) ProtoMessage() {}
|
func (*PostAncestryResponse) ProtoMessage() {}
|
||||||
func (*PostAncestryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
func (*PostAncestryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
|
||||||
|
|
||||||
func (m *PostAncestryResponse) GetStatus() *ClairStatus {
|
func (m *PostAncestryResponse) GetStatus() *ClairStatus {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -457,7 +540,7 @@ type GetNotificationRequest struct {
|
||||||
func (m *GetNotificationRequest) Reset() { *m = GetNotificationRequest{} }
|
func (m *GetNotificationRequest) Reset() { *m = GetNotificationRequest{} }
|
||||||
func (m *GetNotificationRequest) String() string { return proto.CompactTextString(m) }
|
func (m *GetNotificationRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetNotificationRequest) ProtoMessage() {}
|
func (*GetNotificationRequest) ProtoMessage() {}
|
||||||
func (*GetNotificationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
func (*GetNotificationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
|
||||||
|
|
||||||
func (m *GetNotificationRequest) GetOldVulnerabilityPage() string {
|
func (m *GetNotificationRequest) GetOldVulnerabilityPage() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -495,7 +578,7 @@ type GetNotificationResponse struct {
|
||||||
func (m *GetNotificationResponse) Reset() { *m = GetNotificationResponse{} }
|
func (m *GetNotificationResponse) Reset() { *m = GetNotificationResponse{} }
|
||||||
func (m *GetNotificationResponse) String() string { return proto.CompactTextString(m) }
|
func (m *GetNotificationResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetNotificationResponse) ProtoMessage() {}
|
func (*GetNotificationResponse) ProtoMessage() {}
|
||||||
func (*GetNotificationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
|
func (*GetNotificationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
|
||||||
|
|
||||||
func (m *GetNotificationResponse) GetNotification() *GetNotificationResponse_Notification {
|
func (m *GetNotificationResponse) GetNotification() *GetNotificationResponse_Notification {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -513,9 +596,11 @@ type GetNotificationResponse_Notification struct {
|
||||||
Notified string `protobuf:"bytes,3,opt,name=notified" json:"notified,omitempty"`
|
Notified string `protobuf:"bytes,3,opt,name=notified" json:"notified,omitempty"`
|
||||||
// The time at which a notification has been deleted.
|
// The time at which a notification has been deleted.
|
||||||
Deleted string `protobuf:"bytes,4,opt,name=deleted" json:"deleted,omitempty"`
|
Deleted string `protobuf:"bytes,4,opt,name=deleted" json:"deleted,omitempty"`
|
||||||
// The previous vulnerability and a paginated view of the ancestries it affects.
|
// The previous vulnerability and a paginated view of the ancestries it
|
||||||
|
// affects.
|
||||||
Old *PagedVulnerableAncestries `protobuf:"bytes,5,opt,name=old" json:"old,omitempty"`
|
Old *PagedVulnerableAncestries `protobuf:"bytes,5,opt,name=old" json:"old,omitempty"`
|
||||||
// The newly updated vulnerability and a paginated view of the ancestries it affects.
|
// The newly updated vulnerability and a paginated view of the
|
||||||
|
// ancestries it affects.
|
||||||
New *PagedVulnerableAncestries `protobuf:"bytes,6,opt,name=new" json:"new,omitempty"`
|
New *PagedVulnerableAncestries `protobuf:"bytes,6,opt,name=new" json:"new,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -523,7 +608,7 @@ func (m *GetNotificationResponse_Notification) Reset() { *m = GetNotific
|
||||||
func (m *GetNotificationResponse_Notification) String() string { return proto.CompactTextString(m) }
|
func (m *GetNotificationResponse_Notification) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetNotificationResponse_Notification) ProtoMessage() {}
|
func (*GetNotificationResponse_Notification) ProtoMessage() {}
|
||||||
func (*GetNotificationResponse_Notification) Descriptor() ([]byte, []int) {
|
func (*GetNotificationResponse_Notification) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor0, []int{9, 0}
|
return fileDescriptor0, []int{11, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GetNotificationResponse_Notification) GetName() string {
|
func (m *GetNotificationResponse_Notification) GetName() string {
|
||||||
|
@ -585,7 +670,7 @@ type PagedVulnerableAncestries struct {
|
||||||
func (m *PagedVulnerableAncestries) Reset() { *m = PagedVulnerableAncestries{} }
|
func (m *PagedVulnerableAncestries) Reset() { *m = PagedVulnerableAncestries{} }
|
||||||
func (m *PagedVulnerableAncestries) String() string { return proto.CompactTextString(m) }
|
func (m *PagedVulnerableAncestries) String() string { return proto.CompactTextString(m) }
|
||||||
func (*PagedVulnerableAncestries) ProtoMessage() {}
|
func (*PagedVulnerableAncestries) ProtoMessage() {}
|
||||||
func (*PagedVulnerableAncestries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
|
func (*PagedVulnerableAncestries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
|
||||||
|
|
||||||
func (m *PagedVulnerableAncestries) GetCurrentPage() string {
|
func (m *PagedVulnerableAncestries) GetCurrentPage() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -623,8 +708,9 @@ func (m *PagedVulnerableAncestries) GetAncestries() []*PagedVulnerableAncestries
|
||||||
}
|
}
|
||||||
|
|
||||||
type PagedVulnerableAncestries_IndexedAncestryName struct {
|
type PagedVulnerableAncestries_IndexedAncestryName struct {
|
||||||
// The index is an ever increasing number associated with the particular ancestry.
|
// The index is an ever increasing number associated with the particular
|
||||||
// This is useful if you're processing notifications, and need to keep track of the progress of paginating the results.
|
// ancestry. This is useful if you're processing notifications, and need
|
||||||
|
// to keep track of the progress of paginating the results.
|
||||||
Index int32 `protobuf:"varint,1,opt,name=index" json:"index,omitempty"`
|
Index int32 `protobuf:"varint,1,opt,name=index" json:"index,omitempty"`
|
||||||
// The name of the ancestry.
|
// The name of the ancestry.
|
||||||
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
|
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
|
||||||
|
@ -638,7 +724,7 @@ func (m *PagedVulnerableAncestries_IndexedAncestryName) String() string {
|
||||||
}
|
}
|
||||||
func (*PagedVulnerableAncestries_IndexedAncestryName) ProtoMessage() {}
|
func (*PagedVulnerableAncestries_IndexedAncestryName) ProtoMessage() {}
|
||||||
func (*PagedVulnerableAncestries_IndexedAncestryName) Descriptor() ([]byte, []int) {
|
func (*PagedVulnerableAncestries_IndexedAncestryName) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor0, []int{10, 0}
|
return fileDescriptor0, []int{12, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PagedVulnerableAncestries_IndexedAncestryName) GetIndex() int32 {
|
func (m *PagedVulnerableAncestries_IndexedAncestryName) GetIndex() int32 {
|
||||||
|
@ -663,7 +749,7 @@ type MarkNotificationAsReadRequest struct {
|
||||||
func (m *MarkNotificationAsReadRequest) Reset() { *m = MarkNotificationAsReadRequest{} }
|
func (m *MarkNotificationAsReadRequest) Reset() { *m = MarkNotificationAsReadRequest{} }
|
||||||
func (m *MarkNotificationAsReadRequest) String() string { return proto.CompactTextString(m) }
|
func (m *MarkNotificationAsReadRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*MarkNotificationAsReadRequest) ProtoMessage() {}
|
func (*MarkNotificationAsReadRequest) ProtoMessage() {}
|
||||||
func (*MarkNotificationAsReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
|
func (*MarkNotificationAsReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
||||||
|
|
||||||
func (m *MarkNotificationAsReadRequest) GetName() string {
|
func (m *MarkNotificationAsReadRequest) GetName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -678,7 +764,7 @@ type MarkNotificationAsReadResponse struct {
|
||||||
func (m *MarkNotificationAsReadResponse) Reset() { *m = MarkNotificationAsReadResponse{} }
|
func (m *MarkNotificationAsReadResponse) Reset() { *m = MarkNotificationAsReadResponse{} }
|
||||||
func (m *MarkNotificationAsReadResponse) String() string { return proto.CompactTextString(m) }
|
func (m *MarkNotificationAsReadResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*MarkNotificationAsReadResponse) ProtoMessage() {}
|
func (*MarkNotificationAsReadResponse) ProtoMessage() {}
|
||||||
func (*MarkNotificationAsReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
|
func (*MarkNotificationAsReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
||||||
|
|
||||||
type GetStatusRequest struct {
|
type GetStatusRequest struct {
|
||||||
}
|
}
|
||||||
|
@ -686,7 +772,7 @@ type GetStatusRequest struct {
|
||||||
func (m *GetStatusRequest) Reset() { *m = GetStatusRequest{} }
|
func (m *GetStatusRequest) Reset() { *m = GetStatusRequest{} }
|
||||||
func (m *GetStatusRequest) String() string { return proto.CompactTextString(m) }
|
func (m *GetStatusRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetStatusRequest) ProtoMessage() {}
|
func (*GetStatusRequest) ProtoMessage() {}
|
||||||
func (*GetStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
func (*GetStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
|
||||||
|
|
||||||
type GetStatusResponse struct {
|
type GetStatusResponse struct {
|
||||||
// The status of the current Clair instance.
|
// The status of the current Clair instance.
|
||||||
|
@ -696,7 +782,7 @@ type GetStatusResponse struct {
|
||||||
func (m *GetStatusResponse) Reset() { *m = GetStatusResponse{} }
|
func (m *GetStatusResponse) Reset() { *m = GetStatusResponse{} }
|
||||||
func (m *GetStatusResponse) String() string { return proto.CompactTextString(m) }
|
func (m *GetStatusResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetStatusResponse) ProtoMessage() {}
|
func (*GetStatusResponse) ProtoMessage() {}
|
||||||
func (*GetStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
func (*GetStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
|
||||||
|
|
||||||
func (m *GetStatusResponse) GetStatus() *ClairStatus {
|
func (m *GetStatusResponse) GetStatus() *ClairStatus {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -707,6 +793,8 @@ func (m *GetStatusResponse) GetStatus() *ClairStatus {
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Vulnerability)(nil), "coreos.clair.Vulnerability")
|
proto.RegisterType((*Vulnerability)(nil), "coreos.clair.Vulnerability")
|
||||||
|
proto.RegisterType((*Detector)(nil), "coreos.clair.Detector")
|
||||||
|
proto.RegisterType((*Namespace)(nil), "coreos.clair.Namespace")
|
||||||
proto.RegisterType((*Feature)(nil), "coreos.clair.Feature")
|
proto.RegisterType((*Feature)(nil), "coreos.clair.Feature")
|
||||||
proto.RegisterType((*Layer)(nil), "coreos.clair.Layer")
|
proto.RegisterType((*Layer)(nil), "coreos.clair.Layer")
|
||||||
proto.RegisterType((*ClairStatus)(nil), "coreos.clair.ClairStatus")
|
proto.RegisterType((*ClairStatus)(nil), "coreos.clair.ClairStatus")
|
||||||
|
@ -726,6 +814,7 @@ func init() {
|
||||||
proto.RegisterType((*MarkNotificationAsReadResponse)(nil), "coreos.clair.MarkNotificationAsReadResponse")
|
proto.RegisterType((*MarkNotificationAsReadResponse)(nil), "coreos.clair.MarkNotificationAsReadResponse")
|
||||||
proto.RegisterType((*GetStatusRequest)(nil), "coreos.clair.GetStatusRequest")
|
proto.RegisterType((*GetStatusRequest)(nil), "coreos.clair.GetStatusRequest")
|
||||||
proto.RegisterType((*GetStatusResponse)(nil), "coreos.clair.GetStatusResponse")
|
proto.RegisterType((*GetStatusResponse)(nil), "coreos.clair.GetStatusResponse")
|
||||||
|
proto.RegisterEnum("coreos.clair.Detector_DType", Detector_DType_name, Detector_DType_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
@ -837,6 +926,72 @@ var _AncestryService_serviceDesc = grpc.ServiceDesc{
|
||||||
Metadata: "api/v3/clairpb/clair.proto",
|
Metadata: "api/v3/clairpb/clair.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Client API for StatusService service
|
||||||
|
|
||||||
|
type StatusServiceClient interface {
|
||||||
|
// The RPC used to show the internal state of current Clair instance.
|
||||||
|
GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type statusServiceClient struct {
|
||||||
|
cc *grpc.ClientConn
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStatusServiceClient(cc *grpc.ClientConn) StatusServiceClient {
|
||||||
|
return &statusServiceClient{cc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *statusServiceClient) GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) {
|
||||||
|
out := new(GetStatusResponse)
|
||||||
|
err := grpc.Invoke(ctx, "/coreos.clair.StatusService/GetStatus", in, out, c.cc, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server API for StatusService service
|
||||||
|
|
||||||
|
type StatusServiceServer interface {
|
||||||
|
// The RPC used to show the internal state of current Clair instance.
|
||||||
|
GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterStatusServiceServer(s *grpc.Server, srv StatusServiceServer) {
|
||||||
|
s.RegisterService(&_StatusService_serviceDesc, srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _StatusService_GetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(GetStatusRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(StatusServiceServer).GetStatus(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: "/coreos.clair.StatusService/GetStatus",
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(StatusServiceServer).GetStatus(ctx, req.(*GetStatusRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _StatusService_serviceDesc = grpc.ServiceDesc{
|
||||||
|
ServiceName: "coreos.clair.StatusService",
|
||||||
|
HandlerType: (*StatusServiceServer)(nil),
|
||||||
|
Methods: []grpc.MethodDesc{
|
||||||
|
{
|
||||||
|
MethodName: "GetStatus",
|
||||||
|
Handler: _StatusService_GetStatus_Handler,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Streams: []grpc.StreamDesc{},
|
||||||
|
Metadata: "api/v3/clairpb/clair.proto",
|
||||||
|
}
|
||||||
|
|
||||||
// Client API for NotificationService service
|
// Client API for NotificationService service
|
||||||
|
|
||||||
type NotificationServiceClient interface {
|
type NotificationServiceClient interface {
|
||||||
|
@ -938,152 +1093,93 @@ var _NotificationService_serviceDesc = grpc.ServiceDesc{
|
||||||
Metadata: "api/v3/clairpb/clair.proto",
|
Metadata: "api/v3/clairpb/clair.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Client API for StatusService service
|
|
||||||
|
|
||||||
type StatusServiceClient interface {
|
|
||||||
// The RPC used to show the internal state of current Clair instance.
|
|
||||||
GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type statusServiceClient struct {
|
|
||||||
cc *grpc.ClientConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStatusServiceClient(cc *grpc.ClientConn) StatusServiceClient {
|
|
||||||
return &statusServiceClient{cc}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *statusServiceClient) GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) {
|
|
||||||
out := new(GetStatusResponse)
|
|
||||||
err := grpc.Invoke(ctx, "/coreos.clair.StatusService/GetStatus", in, out, c.cc, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Server API for StatusService service
|
|
||||||
|
|
||||||
type StatusServiceServer interface {
|
|
||||||
// The RPC used to show the internal state of current Clair instance.
|
|
||||||
GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterStatusServiceServer(s *grpc.Server, srv StatusServiceServer) {
|
|
||||||
s.RegisterService(&_StatusService_serviceDesc, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _StatusService_GetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(GetStatusRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(StatusServiceServer).GetStatus(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/coreos.clair.StatusService/GetStatus",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(StatusServiceServer).GetStatus(ctx, req.(*GetStatusRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _StatusService_serviceDesc = grpc.ServiceDesc{
|
|
||||||
ServiceName: "coreos.clair.StatusService",
|
|
||||||
HandlerType: (*StatusServiceServer)(nil),
|
|
||||||
Methods: []grpc.MethodDesc{
|
|
||||||
{
|
|
||||||
MethodName: "GetStatus",
|
|
||||||
Handler: _StatusService_GetStatus_Handler,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Streams: []grpc.StreamDesc{},
|
|
||||||
Metadata: "api/v3/clairpb/clair.proto",
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { proto.RegisterFile("api/v3/clairpb/clair.proto", fileDescriptor0) }
|
func init() { proto.RegisterFile("api/v3/clairpb/clair.proto", fileDescriptor0) }
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
var fileDescriptor0 = []byte{
|
||||||
// 1237 bytes of a gzipped FileDescriptorProto
|
// 1345 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4b, 0x6f, 0x1b, 0xd5,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0x4b, 0x6f, 0xdb, 0xc6,
|
||||||
0x17, 0xd7, 0xd8, 0x71, 0x1c, 0x1f, 0xdb, 0x49, 0x7a, 0x93, 0xa6, 0x93, 0x49, 0x1f, 0xc9, 0xfc,
|
0x16, 0xbe, 0x94, 0x23, 0x4b, 0x3a, 0x92, 0x6c, 0x65, 0xec, 0xd8, 0x32, 0x9d, 0x87, 0xcd, 0x7b,
|
||||||
0xff, 0x55, 0x4b, 0x8b, 0x6c, 0xe1, 0xb2, 0x68, 0xcb, 0x02, 0xa5, 0x8f, 0x84, 0x4a, 0xa5, 0xaa,
|
0x83, 0x9b, 0x9b, 0x5b, 0x48, 0xa8, 0x92, 0x02, 0x49, 0xba, 0x28, 0x14, 0x5b, 0x76, 0x0d, 0x24,
|
||||||
0xa6, 0xd0, 0x05, 0x08, 0x59, 0xd7, 0x33, 0xc7, 0xc9, 0x28, 0xe3, 0x19, 0x33, 0xf7, 0xda, 0x89,
|
0xae, 0x41, 0x3b, 0x06, 0xda, 0xa2, 0x60, 0xc7, 0xe2, 0x91, 0x4d, 0x98, 0x22, 0x59, 0x72, 0x64,
|
||||||
0x55, 0x95, 0x05, 0x5b, 0x76, 0xb0, 0xe0, 0x33, 0xb0, 0xe1, 0x1b, 0xb0, 0x62, 0xcb, 0x02, 0xc1,
|
0x47, 0x08, 0xd2, 0x45, 0x77, 0xdd, 0xb6, 0x8b, 0xfe, 0x86, 0x6e, 0xba, 0xe9, 0x4f, 0xe8, 0xbe,
|
||||||
0x16, 0x76, 0x2c, 0xf8, 0x02, 0xec, 0xd1, 0x7d, 0x4d, 0x66, 0x12, 0xe7, 0xd1, 0xb2, 0xf2, 0x9c,
|
0x8b, 0x76, 0xdb, 0xee, 0xba, 0xe8, 0x1f, 0xe8, 0xa2, 0xbb, 0x62, 0x1e, 0xa4, 0x49, 0x9b, 0x7e,
|
||||||
0xf7, 0xeb, 0x77, 0xcf, 0x49, 0xc0, 0xa1, 0xc3, 0xb0, 0x3d, 0xbe, 0xd3, 0xf6, 0x23, 0x1a, 0xa6,
|
0x24, 0x2b, 0xcd, 0x9c, 0x39, 0xcf, 0x6f, 0xbe, 0x39, 0x87, 0x02, 0x9d, 0x06, 0x4e, 0xfb, 0xe8,
|
||||||
0xc3, 0x9e, 0xfa, 0x6d, 0x0d, 0xd3, 0x84, 0x27, 0xa4, 0xe1, 0x27, 0x29, 0x26, 0xac, 0x25, 0x79,
|
0x41, 0xbb, 0xef, 0x52, 0x27, 0x0c, 0xf6, 0xe4, 0x6f, 0x2b, 0x08, 0x7d, 0xe6, 0x93, 0x5a, 0xdf,
|
||||||
0xce, 0xb5, 0x9d, 0x24, 0xd9, 0x89, 0xb0, 0x2d, 0x65, 0xbd, 0x51, 0xbf, 0xcd, 0xc3, 0x01, 0x32,
|
0x0f, 0xd1, 0x8f, 0x5a, 0x42, 0xa6, 0xdf, 0xd9, 0xf7, 0xfd, 0x7d, 0x17, 0xdb, 0xe2, 0x6c, 0x6f,
|
||||||
0x4e, 0x07, 0x43, 0xa5, 0xee, 0x5c, 0xd6, 0x0a, 0xc2, 0x23, 0x8d, 0xe3, 0x84, 0x53, 0x1e, 0x26,
|
0x34, 0x68, 0x33, 0x67, 0x88, 0x11, 0xa3, 0xc3, 0x40, 0xaa, 0xeb, 0x37, 0x95, 0x02, 0xf7, 0x48,
|
||||||
0x31, 0x53, 0x52, 0xf7, 0xfb, 0x12, 0x34, 0x5f, 0x8e, 0xa2, 0x18, 0x53, 0xda, 0x0b, 0xa3, 0x90,
|
0x3d, 0xcf, 0x67, 0x94, 0x39, 0xbe, 0x17, 0xc9, 0x53, 0xe3, 0xbb, 0x02, 0xd4, 0x77, 0x47, 0xae,
|
||||||
0x4f, 0x08, 0x81, 0x99, 0x98, 0x0e, 0xd0, 0xb6, 0xd6, 0xad, 0x9b, 0x35, 0x4f, 0x7e, 0x93, 0xeb,
|
0x87, 0x21, 0xdd, 0x73, 0x5c, 0x87, 0x8d, 0x09, 0x81, 0x6b, 0x1e, 0x1d, 0x62, 0x53, 0x5b, 0xd2,
|
||||||
0x30, 0x2f, 0x7e, 0xd9, 0x90, 0xfa, 0xd8, 0x95, 0xd2, 0x92, 0x94, 0x36, 0x33, 0xee, 0x33, 0xa1,
|
0xee, 0x55, 0x4c, 0xb1, 0x26, 0x77, 0x61, 0x8a, 0xff, 0x46, 0x01, 0xed, 0xa3, 0x25, 0x4e, 0x0b,
|
||||||
0xb6, 0x0e, 0xf5, 0x00, 0x99, 0x9f, 0x86, 0x43, 0x11, 0xc2, 0x2e, 0x4b, 0x9d, 0x3c, 0x4b, 0x38,
|
0xe2, 0xb4, 0x9e, 0x48, 0x37, 0xb9, 0xda, 0x12, 0x54, 0x6d, 0x8c, 0xfa, 0xa1, 0x13, 0xf0, 0x10,
|
||||||
0x8f, 0xc2, 0x78, 0xcf, 0x9e, 0x51, 0xce, 0xc5, 0x37, 0x71, 0x60, 0x8e, 0xe1, 0x18, 0xd3, 0x90,
|
0xcd, 0x09, 0xa1, 0x93, 0x16, 0x71, 0xe7, 0xae, 0xe3, 0x1d, 0x36, 0xaf, 0x49, 0xe7, 0x7c, 0x4d,
|
||||||
0x4f, 0xec, 0x8a, 0xe4, 0x67, 0xb4, 0x90, 0x0d, 0x90, 0xd3, 0x80, 0x72, 0x6a, 0xcf, 0x2a, 0x99,
|
0x74, 0x28, 0x47, 0x78, 0x84, 0xa1, 0xc3, 0xc6, 0xcd, 0xa2, 0x90, 0x27, 0x7b, 0x7e, 0x36, 0x44,
|
||||||
0xa1, 0xc9, 0x2a, 0xcc, 0xf5, 0xc3, 0x03, 0x0c, 0xba, 0xbd, 0x89, 0x5d, 0x95, 0xb2, 0xaa, 0xa4,
|
0x46, 0x6d, 0xca, 0x68, 0x73, 0x52, 0x9e, 0xc5, 0x7b, 0xb2, 0x00, 0xe5, 0x81, 0xf3, 0x12, 0x6d,
|
||||||
0x1f, 0x4c, 0xc8, 0x03, 0xb8, 0x40, 0xfb, 0x7d, 0xf4, 0x39, 0x06, 0xdd, 0x31, 0xa6, 0x4c, 0x14,
|
0x6b, 0x6f, 0xdc, 0x2c, 0x89, 0xb3, 0x92, 0xd8, 0x3f, 0x1d, 0x93, 0xa7, 0x70, 0x9d, 0x0e, 0x06,
|
||||||
0x6c, 0xcf, 0xad, 0x97, 0x6f, 0xd6, 0x3b, 0x17, 0x5b, 0xf9, 0xf6, 0xb5, 0xb6, 0x90, 0xf2, 0x51,
|
0xd8, 0x67, 0x68, 0x5b, 0x47, 0x18, 0x46, 0xbc, 0xe0, 0x66, 0x79, 0x69, 0xe2, 0x5e, 0xb5, 0x73,
|
||||||
0x8a, 0xde, 0xa2, 0xd1, 0x7f, 0xa9, 0xd5, 0xdd, 0x5f, 0x2c, 0xa8, 0x6a, 0xe9, 0x7f, 0xe9, 0x89,
|
0xa3, 0x95, 0x86, 0xaf, 0xb5, 0x86, 0x94, 0x8d, 0x42, 0x34, 0x1b, 0xb1, 0xfe, 0xae, 0x52, 0x37,
|
||||||
0x0d, 0x55, 0x9d, 0x81, 0xee, 0x87, 0x21, 0x85, 0x03, 0xfd, 0xd9, 0xed, 0x27, 0xe9, 0x80, 0x72,
|
0x7e, 0xd6, 0xa0, 0xbc, 0x8a, 0x0c, 0xfb, 0xcc, 0x0f, 0x73, 0x41, 0x69, 0x42, 0x49, 0xf9, 0x56,
|
||||||
0xdd, 0x95, 0xa6, 0xe6, 0x6e, 0x49, 0x26, 0x79, 0x0c, 0x0b, 0xe3, 0xdc, 0x80, 0x42, 0x64, 0x76,
|
0x68, 0xc4, 0x5b, 0xd2, 0x81, 0xa2, 0xcd, 0xc6, 0x01, 0x0a, 0x04, 0xa6, 0x3a, 0x37, 0xb3, 0x21,
|
||||||
0x45, 0x56, 0xb2, 0x56, 0xac, 0xa4, 0x30, 0x45, 0xef, 0xa8, 0x8d, 0xbb, 0x06, 0x95, 0xa7, 0x74,
|
0x63, 0xa7, 0xad, 0xd5, 0x9d, 0x71, 0x80, 0xa6, 0x54, 0x35, 0x3e, 0x87, 0xa2, 0xd8, 0x93, 0x45,
|
||||||
0x82, 0xa9, 0xa8, 0x65, 0x97, 0xb2, 0x5d, 0x53, 0x8b, 0xf8, 0x76, 0xbf, 0xb1, 0xa0, 0xfe, 0x50,
|
0x98, 0x5f, 0xed, 0xed, 0xf4, 0x56, 0x76, 0x3e, 0x32, 0xad, 0x55, 0x6b, 0xe7, 0xe3, 0xad, 0x9e,
|
||||||
0x78, 0x79, 0xc1, 0x29, 0x1f, 0x31, 0x91, 0x74, 0x14, 0x32, 0x8e, 0x29, 0xb3, 0xad, 0xf5, 0xb2,
|
0xb5, 0xb1, 0xb9, 0xdb, 0x7d, 0xb6, 0xb1, 0xda, 0xf8, 0x17, 0xb9, 0x05, 0x0b, 0xa7, 0x0f, 0x37,
|
||||||
0x48, 0x5a, 0x93, 0xe4, 0x32, 0xd4, 0x02, 0xe4, 0xe8, 0xf3, 0x24, 0x65, 0x76, 0x49, 0xca, 0x0e,
|
0xbb, 0xcf, 0x7b, 0xdb, 0x5b, 0xdd, 0x95, 0x5e, 0x43, 0xcb, 0xb3, 0x5d, 0xeb, 0x75, 0x77, 0x5e,
|
||||||
0x19, 0xe4, 0x11, 0x2c, 0x46, 0x94, 0xf1, 0xee, 0x68, 0x18, 0x50, 0x8e, 0x5d, 0x01, 0x45, 0x59,
|
0x98, 0xbd, 0x46, 0xc1, 0xd8, 0x86, 0xca, 0x66, 0x7c, 0x5d, 0xb9, 0x05, 0x75, 0xa0, 0x6c, 0xab,
|
||||||
0x75, 0xbd, 0xe3, 0xb4, 0x14, 0x0c, 0x5b, 0x06, 0xa7, 0xad, 0x4f, 0x0c, 0x4e, 0xbd, 0x79, 0x61,
|
0xdc, 0x44, 0x45, 0xd5, 0xce, 0x5c, 0x7e, 0xe6, 0x66, 0xa2, 0x67, 0xfc, 0x58, 0x80, 0x92, 0xc2,
|
||||||
0xf3, 0xa9, 0x34, 0x11, 0x4c, 0xf7, 0x1e, 0x90, 0x6d, 0xe4, 0x9b, 0xb1, 0x8f, 0x8c, 0xa7, 0x13,
|
0x30, 0xd7, 0xe7, 0x7b, 0x50, 0x49, 0x38, 0xa2, 0x9c, 0xce, 0x67, 0x9d, 0x26, 0x39, 0x99, 0x27,
|
||||||
0x0f, 0xbf, 0x1c, 0x21, 0xe3, 0xe4, 0x7f, 0xd0, 0xa4, 0x9a, 0xd5, 0xcd, 0x0d, 0xa3, 0x61, 0x98,
|
0x9a, 0x69, 0x6c, 0x27, 0xb2, 0xd8, 0xde, 0x85, 0x29, 0xb5, 0xb4, 0x06, 0x7e, 0x38, 0xa4, 0x4c,
|
||||||
0xa2, 0xdb, 0xee, 0xaf, 0x65, 0x58, 0x2a, 0xd8, 0xb2, 0x61, 0x12, 0x33, 0x24, 0x5b, 0x30, 0x67,
|
0x71, 0xa9, 0xae, 0xa4, 0x6b, 0x42, 0x98, 0xa9, 0xa5, 0x78, 0xb5, 0x5a, 0x48, 0x0f, 0xa6, 0x8f,
|
||||||
0xf4, 0xa4, 0x5d, 0xbd, 0x73, 0xab, 0xd8, 0xbd, 0x29, 0x46, 0xad, 0x8c, 0x91, 0xd9, 0x92, 0xf7,
|
0x52, 0x4f, 0xc1, 0xc1, 0xa8, 0x39, 0x29, 0x38, 0xb3, 0x98, 0x35, 0xcd, 0xbc, 0x17, 0xf3, 0xb4,
|
||||||
0x60, 0x96, 0xc9, 0x16, 0xc9, 0x61, 0xd7, 0x3b, 0xab, 0x45, 0x2f, 0xb9, 0x1e, 0x7a, 0x5a, 0xd1,
|
0x0d, 0x59, 0x86, 0xda, 0x40, 0x22, 0x62, 0x09, 0x12, 0x48, 0x6e, 0x56, 0x95, 0x8c, 0xdf, 0xb1,
|
||||||
0xf9, 0x0a, 0x9a, 0xc6, 0x91, 0x1a, 0xc0, 0x3b, 0x50, 0x89, 0xc4, 0x87, 0x4e, 0x64, 0xa9, 0xe8,
|
0xb1, 0x08, 0xc5, 0x67, 0x74, 0x8c, 0x82, 0x57, 0x07, 0x34, 0x3a, 0x88, 0x21, 0xe3, 0x6b, 0xe3,
|
||||||
0x42, 0xea, 0x78, 0x4a, 0x43, 0xe0, 0x58, 0x35, 0x17, 0x83, 0x6e, 0x5f, 0x61, 0x51, 0x75, 0xfd,
|
0x6b, 0x0d, 0xaa, 0x2b, 0x3c, 0xd0, 0x36, 0xa3, 0x6c, 0x14, 0x91, 0x87, 0x50, 0x89, 0x53, 0x8c,
|
||||||
0x64, 0x1c, 0x1b, 0x7d, 0xcd, 0x60, 0xce, 0x4f, 0x16, 0xcc, 0x99, 0x04, 0xa6, 0x02, 0xf9, 0x06,
|
0x9a, 0x9a, 0x48, 0xe8, 0xbc, 0x5a, 0x4e, 0x14, 0xc9, 0x2a, 0x34, 0x5c, 0x1a, 0x31, 0x6b, 0x14,
|
||||||
0x2c, 0x30, 0x9f, 0xc6, 0x31, 0x06, 0x5d, 0x33, 0xf4, 0x19, 0x39, 0xd8, 0x79, 0xcd, 0x7e, 0xaa,
|
0xd8, 0x94, 0xa1, 0xc5, 0xbb, 0x82, 0xc2, 0x5f, 0x6f, 0xc9, 0x8e, 0xd0, 0x8a, 0x5b, 0x46, 0x6b,
|
||||||
0x67, 0x7f, 0x1b, 0x2e, 0x18, 0xc5, 0x43, 0x0c, 0x54, 0xa4, 0xea, 0xa2, 0x16, 0x3c, 0xca, 0xa0,
|
0x27, 0x6e, 0x19, 0xe6, 0x14, 0xb7, 0x79, 0x21, 0x4c, 0xb8, 0xd0, 0x78, 0x0c, 0x64, 0x1d, 0x59,
|
||||||
0xb0, 0x0d, 0xb3, 0xb2, 0x06, 0x66, 0xcf, 0xca, 0x7c, 0xdb, 0xe7, 0xef, 0xb7, 0x6a, 0x81, 0x36,
|
0xd7, 0xeb, 0x63, 0xc4, 0xc2, 0xb1, 0x89, 0x5f, 0x8c, 0x30, 0x62, 0xe4, 0xdf, 0x50, 0xa7, 0x4a,
|
||||||
0x77, 0xff, 0x2c, 0xc1, 0xd2, 0xf3, 0x84, 0xbd, 0x15, 0x1e, 0xc8, 0x0a, 0xcc, 0xea, 0xb7, 0xa5,
|
0x64, 0xa5, 0x6e, 0xbc, 0x16, 0x0b, 0xf9, 0x95, 0x1a, 0x7f, 0x17, 0x60, 0x26, 0x63, 0x1b, 0x05,
|
||||||
0x1e, 0xa7, 0xa6, 0xc8, 0xc3, 0x2c, 0xbb, 0xb2, 0xcc, 0xee, 0x76, 0x31, 0xbb, 0x29, 0xf1, 0x24,
|
0xbe, 0x17, 0x21, 0x59, 0x83, 0x72, 0xac, 0x27, 0xec, 0xaa, 0x9d, 0xfb, 0xd9, 0x6a, 0x72, 0x8c,
|
||||||
0xaf, 0x90, 0x99, 0xf3, 0xb3, 0x05, 0xb5, 0x8c, 0x3b, 0xed, 0x5d, 0x09, 0xde, 0x90, 0xf2, 0x5d,
|
0x5a, 0x89, 0x20, 0xb1, 0x25, 0xef, 0xc2, 0x64, 0x24, 0x00, 0x52, 0x65, 0x2d, 0x64, 0xbd, 0xa4,
|
||||||
0x1d, 0x5c, 0x7e, 0x13, 0x0f, 0xaa, 0xbb, 0x48, 0x83, 0xc3, 0xd8, 0x77, 0xdf, 0x20, 0x76, 0xeb,
|
0x10, 0x34, 0x95, 0xa2, 0xfe, 0x25, 0xd4, 0x63, 0x47, 0x12, 0xfe, 0xff, 0x41, 0xd1, 0xe5, 0x0b,
|
||||||
0x23, 0x65, 0xfa, 0x38, 0x16, 0x52, 0xe3, 0xc8, 0xb9, 0x0f, 0x8d, 0xbc, 0x80, 0x2c, 0x42, 0x79,
|
0x95, 0xc8, 0x4c, 0xd6, 0x85, 0xd0, 0x31, 0xa5, 0x06, 0x6f, 0x29, 0x12, 0x5c, 0xb4, 0x2d, 0x75,
|
||||||
0x0f, 0x27, 0x3a, 0x15, 0xf1, 0x49, 0x96, 0xa1, 0x32, 0xa6, 0xd1, 0xc8, 0x2c, 0x29, 0x45, 0xdc,
|
0x95, 0x3c, 0xf2, 0x45, 0x2d, 0x25, 0xd6, 0x57, 0x82, 0x48, 0xdf, 0x87, 0x72, 0x1c, 0x3f, 0xf7,
|
||||||
0x2f, 0xdd, 0xb5, 0xdc, 0x27, 0xb0, 0x5c, 0x0c, 0xa9, 0x9f, 0xcc, 0x21, 0xd4, 0xad, 0x73, 0x42,
|
0xb1, 0xac, 0xc3, 0xa4, 0x08, 0x16, 0x35, 0x27, 0x84, 0xe3, 0xf6, 0xd5, 0x81, 0x91, 0xb9, 0x2a,
|
||||||
0xdd, 0xfd, 0xd1, 0x82, 0x95, 0x6d, 0xe4, 0xcf, 0x12, 0x1e, 0xf6, 0x43, 0x5f, 0xde, 0x19, 0x33,
|
0x73, 0xe3, 0xf7, 0x02, 0xcc, 0x6c, 0xf9, 0xd1, 0x5b, 0x5d, 0x1c, 0x99, 0x83, 0x49, 0xf5, 0xb2,
|
||||||
0xad, 0xf7, 0x61, 0x25, 0x89, 0x82, 0x6e, 0x7e, 0x2b, 0x4d, 0xba, 0x43, 0xba, 0x63, 0xc6, 0xb6,
|
0x64, 0x5b, 0x53, 0x3b, 0xb2, 0x72, 0x2a, 0xbb, 0xff, 0x67, 0xb3, 0xcb, 0x89, 0x27, 0x64, 0x99,
|
||||||
0x9c, 0x44, 0x41, 0x61, 0x83, 0x3d, 0xa7, 0x3b, 0x28, 0xac, 0x62, 0xdc, 0x9f, 0x66, 0xa5, 0xca,
|
0xcc, 0xf4, 0x9f, 0x34, 0xa8, 0x24, 0xd2, 0x3c, 0xfa, 0x73, 0x59, 0x40, 0xd9, 0x81, 0x0a, 0x2e,
|
||||||
0x58, 0x8e, 0x71, 0xff, 0xb8, 0xd5, 0x32, 0x54, 0xa2, 0x70, 0x10, 0x72, 0xb9, 0x7a, 0x2a, 0x9e,
|
0xd6, 0xc4, 0x84, 0xd2, 0x01, 0x52, 0xfb, 0x24, 0xf6, 0xa3, 0x37, 0x88, 0xdd, 0xfa, 0x50, 0x9a,
|
||||||
0x22, 0x32, 0xe8, 0xcf, 0x1c, 0x42, 0xdf, 0xfd, 0xa3, 0x04, 0x97, 0x8e, 0x25, 0xac, 0xeb, 0x7f,
|
0xf6, 0x3c, 0x7e, 0x1a, 0x3b, 0xd2, 0x9f, 0x40, 0x2d, 0x7d, 0x40, 0x1a, 0x30, 0x71, 0x88, 0x63,
|
||||||
0x09, 0x8d, 0x38, 0xc7, 0xd7, 0x5d, 0xe8, 0x1c, 0x83, 0xf1, 0x34, 0xe3, 0x56, 0x81, 0x59, 0xf0,
|
0x95, 0x0a, 0x5f, 0x92, 0x59, 0x28, 0x1e, 0x51, 0x77, 0x14, 0x0f, 0x3b, 0xb9, 0x79, 0x52, 0x78,
|
||||||
0xe3, 0xfc, 0x6d, 0x41, 0x23, 0x2f, 0x9e, 0xfa, 0x26, 0x6d, 0xa8, 0xfa, 0x29, 0x52, 0x8e, 0x81,
|
0xa4, 0x19, 0x1b, 0x30, 0x9b, 0x0d, 0xa9, 0xb8, 0x7d, 0xc2, 0x49, 0xed, 0x8a, 0x9c, 0x34, 0x7e,
|
||||||
0xae, 0xd4, 0x90, 0xe2, 0x22, 0x2a, 0x77, 0x18, 0xe8, 0x83, 0x92, 0xd1, 0xc2, 0x2a, 0xc0, 0x08,
|
0xd0, 0x60, 0x6e, 0x1d, 0xd9, 0xa6, 0xcf, 0x9c, 0x81, 0xd3, 0x17, 0xb3, 0x39, 0xbe, 0xad, 0x87,
|
||||||
0x85, 0x95, 0xaa, 0xd2, 0x90, 0xe4, 0x1e, 0x94, 0x93, 0x28, 0x90, 0xe7, 0xb5, 0xde, 0xb9, 0x71,
|
0x30, 0xe7, 0xbb, 0xb6, 0x95, 0xee, 0x2f, 0x63, 0x2b, 0xa0, 0xfb, 0xf1, 0xb5, 0xcd, 0xfa, 0xae,
|
||||||
0x04, 0x70, 0x74, 0x07, 0xb3, 0xde, 0x47, 0xa8, 0x81, 0x10, 0x22, 0xf3, 0x84, 0x8d, 0x30, 0x8d,
|
0x9d, 0xe9, 0x45, 0x5b, 0x74, 0x1f, 0xb9, 0x95, 0x87, 0xc7, 0x79, 0x56, 0xb2, 0x8c, 0x59, 0x0f,
|
||||||
0x71, 0x5f, 0x5e, 0xdf, 0x37, 0x31, 0x8d, 0x71, 0xdf, 0xfd, 0xad, 0x04, 0xab, 0x27, 0xaa, 0x90,
|
0x8f, 0xcf, 0x5a, 0xcd, 0x42, 0xd1, 0x75, 0x86, 0x0e, 0x13, 0xed, 0xb6, 0x68, 0xca, 0x4d, 0x42,
|
||||||
0x0d, 0x68, 0xf8, 0xa3, 0x34, 0xc5, 0x98, 0xe7, 0x81, 0x50, 0xd7, 0x3c, 0x39, 0xc9, 0x35, 0xa8,
|
0xd2, 0x6b, 0x27, 0x24, 0x35, 0x7e, 0x2b, 0xc0, 0xfc, 0x99, 0x84, 0x55, 0xfd, 0xbb, 0x50, 0xf3,
|
||||||
0xc5, 0x78, 0xc0, 0xf3, 0x23, 0x9f, 0x13, 0x8c, 0x53, 0xc6, 0xbc, 0x09, 0xcd, 0x02, 0x5c, 0x64,
|
0x52, 0x72, 0x85, 0x42, 0xe7, 0x0c, 0x8d, 0xf3, 0x8c, 0x5b, 0x19, 0x61, 0xc6, 0x8f, 0xfe, 0xa7,
|
||||||
0x27, 0xce, 0x38, 0x96, 0x45, 0x0b, 0xf2, 0x39, 0x00, 0xcd, 0xd2, 0xd4, 0xc7, 0xf6, 0x83, 0x73,
|
0x06, 0xb5, 0xf4, 0xf1, 0x79, 0xf3, 0xb8, 0x1f, 0x22, 0x65, 0x68, 0xc7, 0xf3, 0x58, 0x6d, 0xf9,
|
||||||
0x16, 0xde, 0x7a, 0x12, 0x07, 0x78, 0x80, 0xc1, 0x66, 0x6e, 0x0b, 0x79, 0x39, 0x77, 0xce, 0x87,
|
0x57, 0x84, 0x74, 0x87, 0xb6, 0x1a, 0x27, 0xc9, 0x9e, 0x5b, 0xd9, 0xe8, 0x22, 0xb7, 0x92, 0x55,
|
||||||
0xb0, 0x34, 0x45, 0x45, 0x14, 0x13, 0x0a, 0xb6, 0xec, 0x42, 0xc5, 0x53, 0x44, 0x06, 0x8d, 0x52,
|
0xc6, 0x5b, 0xf2, 0x18, 0x26, 0x7c, 0xd7, 0x56, 0xd3, 0xe3, 0xbf, 0xa7, 0x08, 0x47, 0xf7, 0x31,
|
||||||
0x0e, 0xb3, 0x77, 0xe0, 0xca, 0xc7, 0x34, 0xdd, 0xcb, 0x43, 0x68, 0x93, 0x79, 0x48, 0x03, 0xf3,
|
0xc1, 0xde, 0x45, 0x45, 0x04, 0x07, 0x23, 0x93, 0xdb, 0x70, 0x53, 0x0f, 0x8f, 0xc5, 0x17, 0xcb,
|
||||||
0xd4, 0xa6, 0xe0, 0xc9, 0x5d, 0x87, 0xab, 0x27, 0x19, 0x29, 0xc4, 0xba, 0x04, 0x16, 0xb7, 0x91,
|
0x9b, 0x98, 0x7a, 0x78, 0x6c, 0xfc, 0x52, 0x80, 0x85, 0x73, 0x55, 0xf8, 0x6c, 0xe9, 0x8f, 0xc2,
|
||||||
0xeb, 0x07, 0xad, 0x3c, 0xb9, 0x5b, 0x70, 0x21, 0xc7, 0x7b, 0xeb, 0xbd, 0xd0, 0xf9, 0xc7, 0x82,
|
0x10, 0x3d, 0x96, 0x26, 0x42, 0x55, 0xc9, 0xc4, 0x4d, 0x2e, 0x42, 0xc5, 0xc3, 0x97, 0x2c, 0x7d,
|
||||||
0x05, 0x53, 0xed, 0x0b, 0x4c, 0xc7, 0xa1, 0x8f, 0x64, 0x04, 0xf5, 0xdc, 0x0d, 0x20, 0xeb, 0xa7,
|
0xe5, 0x65, 0x2e, 0xb8, 0xe0, 0x9a, 0xbb, 0x50, 0xcf, 0xd0, 0x45, 0x20, 0x71, 0xc9, 0xd8, 0xcb,
|
||||||
0x9c, 0x07, 0x99, 0x8c, 0xb3, 0x71, 0xe6, 0x01, 0x71, 0x37, 0xbe, 0xfe, 0xfd, 0xaf, 0xef, 0x4a,
|
0x5a, 0x90, 0x4f, 0x01, 0x68, 0x92, 0x66, 0xb3, 0x28, 0x1e, 0xe9, 0xfb, 0x57, 0x2c, 0xbc, 0xb5,
|
||||||
0x6b, 0x64, 0xb5, 0x6d, 0x8e, 0x40, 0xfb, 0x55, 0xe1, 0x46, 0xbc, 0x26, 0x7b, 0xd0, 0xc8, 0x6f,
|
0xe1, 0xd9, 0xf8, 0x12, 0xed, 0x6e, 0xaa, 0x0b, 0x99, 0x29, 0x77, 0xfa, 0x07, 0x30, 0x93, 0xa3,
|
||||||
0x3b, 0xb2, 0x71, 0xe6, 0xf2, 0x75, 0xdc, 0xd3, 0x54, 0x74, 0xe4, 0x65, 0x19, 0x79, 0xde, 0xad,
|
0xc2, 0x8b, 0x71, 0xb8, 0x58, 0xa0, 0x50, 0x34, 0xe5, 0x26, 0xa1, 0x46, 0x21, 0xc5, 0xd9, 0x07,
|
||||||
0x65, 0x91, 0xef, 0x5b, 0xb7, 0x3a, 0x3f, 0x94, 0x60, 0x29, 0xdf, 0x72, 0x53, 0xfb, 0x6b, 0x58,
|
0x70, 0xeb, 0x39, 0x0d, 0x0f, 0xd3, 0x14, 0xea, 0x46, 0x26, 0x52, 0x3b, 0x7e, 0x6a, 0x39, 0x7c,
|
||||||
0x38, 0xb2, 0x38, 0xc8, 0xff, 0xcf, 0xd8, 0x2b, 0x2a, 0x95, 0xeb, 0xe7, 0xda, 0x3e, 0xee, 0x15,
|
0x32, 0x96, 0xe0, 0xf6, 0x79, 0x46, 0x92, 0xb1, 0x06, 0x81, 0xc6, 0x3a, 0x32, 0xf5, 0xa0, 0xa5,
|
||||||
0x99, 0xcd, 0x25, 0x72, 0xb1, 0x9d, 0xdf, 0x3c, 0xac, 0xfd, 0x4a, 0xf5, 0xe0, 0x5b, 0x0b, 0x56,
|
0x27, 0x63, 0x0d, 0xae, 0xa7, 0x64, 0x6f, 0xdd, 0x17, 0x3a, 0x7f, 0x69, 0x30, 0x1d, 0x57, 0xbb,
|
||||||
0xa6, 0xa3, 0x81, 0x1c, 0xb9, 0x83, 0xa7, 0x02, 0xcd, 0x79, 0xf7, 0x7c, 0xca, 0xc5, 0xa4, 0x6e,
|
0x8d, 0xe1, 0x91, 0xd3, 0x47, 0x32, 0x82, 0x6a, 0x6a, 0x06, 0x90, 0xa5, 0x0b, 0xc6, 0x83, 0x48,
|
||||||
0x4d, 0x4f, 0xaa, 0x13, 0x43, 0x53, 0xa1, 0xc6, 0x34, 0xe9, 0x0b, 0xa8, 0x65, 0xe0, 0x23, 0x57,
|
0x46, 0x5f, 0xbe, 0x74, 0x80, 0x18, 0xcb, 0x5f, 0xfd, 0xfa, 0xc7, 0xb7, 0x85, 0x45, 0xb2, 0xd0,
|
||||||
0x8f, 0x15, 0x5e, 0x40, 0xaa, 0x73, 0xed, 0x44, 0xb9, 0x8e, 0xbe, 0x20, 0xa3, 0xd7, 0x48, 0xb5,
|
0x8e, 0x87, 0x40, 0xfb, 0x55, 0x66, 0x46, 0xbc, 0x26, 0x87, 0x50, 0x4b, 0x77, 0x3b, 0xb2, 0x7c,
|
||||||
0xad, 0x30, 0xf9, 0xe0, 0x2a, 0x2c, 0xf9, 0xc9, 0xa0, 0x68, 0x36, 0xec, 0x7d, 0x56, 0xd5, 0xff,
|
0x69, 0xf3, 0xd5, 0x8d, 0x8b, 0x54, 0x54, 0xe4, 0x59, 0x11, 0x79, 0xca, 0xa8, 0x24, 0x91, 0x9f,
|
||||||
0x71, 0xf5, 0x66, 0xe5, 0x1f, 0xaa, 0x77, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xd2, 0x74,
|
0x68, 0xf7, 0x3b, 0x1e, 0xd4, 0x25, 0x12, 0x71, 0xd1, 0x9f, 0x41, 0x25, 0x01, 0x94, 0xdc, 0x3e,
|
||||||
0xfa, 0x8a, 0x0d, 0x00, 0x00,
|
0x53, 0x50, 0x06, 0x7d, 0xfd, 0xce, 0xb9, 0xe7, 0x2a, 0xe8, 0xb4, 0x08, 0x5a, 0x21, 0xa5, 0xb6,
|
||||||
|
0xc2, 0xf9, 0xfb, 0x02, 0xcc, 0xa4, 0xaf, 0x38, 0x0e, 0xfb, 0x1a, 0xa6, 0x4f, 0x35, 0x2a, 0xf2,
|
||||||
|
0x9f, 0x4b, 0xfa, 0x98, 0x4c, 0xe1, 0xee, 0x95, 0xba, 0x9d, 0x71, 0x4b, 0x24, 0x32, 0x4f, 0x6e,
|
||||||
|
0xb4, 0xd3, 0x9d, 0x2e, 0x6a, 0xbf, 0x92, 0x98, 0x7f, 0xa3, 0xc1, 0x5c, 0x3e, 0xfb, 0xc8, 0xa9,
|
||||||
|
0xb9, 0x7b, 0x21, 0xb1, 0xf5, 0x77, 0xae, 0xa6, 0x9c, 0x4d, 0xea, 0x7e, 0x7e, 0x52, 0x4f, 0x6f,
|
||||||
|
0xc3, 0x4c, 0xdf, 0x1f, 0x66, 0x3d, 0x06, 0x7b, 0x9f, 0x94, 0xd4, 0xbf, 0xd4, 0xbd, 0x49, 0xf1,
|
||||||
|
0x45, 0xf9, 0xe0, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1b, 0x4b, 0xee, 0xfe, 0xbe, 0x0e, 0x00,
|
||||||
|
0x00,
|
||||||
}
|
}
|
||||||
|
|
174
vendor/github.com/coreos/clair/api/v3/clairpb/clair.pb.gw.go
generated
vendored
174
vendor/github.com/coreos/clair/api/v3/clairpb/clair.pb.gw.go
generated
vendored
|
@ -70,6 +70,15 @@ func request_AncestryService_PostAncestry_0(ctx context.Context, marshaler runti
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func request_StatusService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client StatusServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||||
|
var protoReq GetStatusRequest
|
||||||
|
var metadata runtime.ServerMetadata
|
||||||
|
|
||||||
|
msg, err := client.GetStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||||
|
return msg, metadata, err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
filter_NotificationService_GetNotification_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
|
filter_NotificationService_GetNotification_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
|
||||||
)
|
)
|
||||||
|
@ -132,15 +141,6 @@ func request_NotificationService_MarkNotificationAsRead_0(ctx context.Context, m
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func request_StatusService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client StatusServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
|
||||||
var protoReq GetStatusRequest
|
|
||||||
var metadata runtime.ServerMetadata
|
|
||||||
|
|
||||||
msg, err := client.GetStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
|
||||||
return msg, metadata, err
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterAncestryServiceHandlerFromEndpoint is same as RegisterAncestryServiceHandler but
|
// RegisterAncestryServiceHandlerFromEndpoint is same as RegisterAncestryServiceHandler but
|
||||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||||
func RegisterAncestryServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
func RegisterAncestryServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||||
|
@ -252,6 +252,84 @@ var (
|
||||||
forward_AncestryService_PostAncestry_0 = runtime.ForwardResponseMessage
|
forward_AncestryService_PostAncestry_0 = runtime.ForwardResponseMessage
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// RegisterStatusServiceHandlerFromEndpoint is same as RegisterStatusServiceHandler but
|
||||||
|
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||||
|
func RegisterStatusServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||||
|
conn, err := grpc.Dial(endpoint, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
if cerr := conn.Close(); cerr != nil {
|
||||||
|
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
if cerr := conn.Close(); cerr != nil {
|
||||||
|
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return RegisterStatusServiceHandler(ctx, mux, conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterStatusServiceHandler registers the http handlers for service StatusService to "mux".
|
||||||
|
// The handlers forward requests to the grpc endpoint over "conn".
|
||||||
|
func RegisterStatusServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||||
|
return RegisterStatusServiceHandlerClient(ctx, mux, NewStatusServiceClient(conn))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterStatusServiceHandler registers the http handlers for service StatusService to "mux".
|
||||||
|
// The handlers forward requests to the grpc endpoint over the given implementation of "StatusServiceClient".
|
||||||
|
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "StatusServiceClient"
|
||||||
|
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||||
|
// "StatusServiceClient" to call the correct interceptors.
|
||||||
|
func RegisterStatusServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client StatusServiceClient) error {
|
||||||
|
|
||||||
|
mux.Handle("GET", pattern_StatusService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||||
|
ctx, cancel := context.WithCancel(req.Context())
|
||||||
|
defer cancel()
|
||||||
|
if cn, ok := w.(http.CloseNotifier); ok {
|
||||||
|
go func(done <-chan struct{}, closed <-chan bool) {
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-closed:
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
}(ctx.Done(), cn.CloseNotify())
|
||||||
|
}
|
||||||
|
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||||
|
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||||
|
if err != nil {
|
||||||
|
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resp, md, err := request_StatusService_GetStatus_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||||
|
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||||
|
if err != nil {
|
||||||
|
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
forward_StatusService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
pattern_StatusService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0}, []string{"status"}, ""))
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
forward_StatusService_GetStatus_0 = runtime.ForwardResponseMessage
|
||||||
|
)
|
||||||
|
|
||||||
// RegisterNotificationServiceHandlerFromEndpoint is same as RegisterNotificationServiceHandler but
|
// RegisterNotificationServiceHandlerFromEndpoint is same as RegisterNotificationServiceHandler but
|
||||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||||
func RegisterNotificationServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
func RegisterNotificationServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||||
|
@ -362,81 +440,3 @@ var (
|
||||||
|
|
||||||
forward_NotificationService_MarkNotificationAsRead_0 = runtime.ForwardResponseMessage
|
forward_NotificationService_MarkNotificationAsRead_0 = runtime.ForwardResponseMessage
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegisterStatusServiceHandlerFromEndpoint is same as RegisterStatusServiceHandler but
|
|
||||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
|
||||||
func RegisterStatusServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
|
||||||
conn, err := grpc.Dial(endpoint, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
if cerr := conn.Close(); cerr != nil {
|
|
||||||
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
<-ctx.Done()
|
|
||||||
if cerr := conn.Close(); cerr != nil {
|
|
||||||
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}()
|
|
||||||
|
|
||||||
return RegisterStatusServiceHandler(ctx, mux, conn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterStatusServiceHandler registers the http handlers for service StatusService to "mux".
|
|
||||||
// The handlers forward requests to the grpc endpoint over "conn".
|
|
||||||
func RegisterStatusServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
|
||||||
return RegisterStatusServiceHandlerClient(ctx, mux, NewStatusServiceClient(conn))
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterStatusServiceHandler registers the http handlers for service StatusService to "mux".
|
|
||||||
// The handlers forward requests to the grpc endpoint over the given implementation of "StatusServiceClient".
|
|
||||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "StatusServiceClient"
|
|
||||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
|
||||||
// "StatusServiceClient" to call the correct interceptors.
|
|
||||||
func RegisterStatusServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client StatusServiceClient) error {
|
|
||||||
|
|
||||||
mux.Handle("GET", pattern_StatusService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
|
||||||
ctx, cancel := context.WithCancel(req.Context())
|
|
||||||
defer cancel()
|
|
||||||
if cn, ok := w.(http.CloseNotifier); ok {
|
|
||||||
go func(done <-chan struct{}, closed <-chan bool) {
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
case <-closed:
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
}(ctx.Done(), cn.CloseNotify())
|
|
||||||
}
|
|
||||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
|
||||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
|
||||||
if err != nil {
|
|
||||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resp, md, err := request_StatusService_GetStatus_0(rctx, inboundMarshaler, client, req, pathParams)
|
|
||||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
|
||||||
if err != nil {
|
|
||||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
forward_StatusService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
pattern_StatusService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0}, []string{"status"}, ""))
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
forward_StatusService_GetStatus_0 = runtime.ForwardResponseMessage
|
|
||||||
)
|
|
||||||
|
|
143
vendor/github.com/coreos/clair/api/v3/clairpb/clair.proto
generated
vendored
143
vendor/github.com/coreos/clair/api/v3/clairpb/clair.proto
generated
vendored
|
@ -23,12 +23,55 @@ package coreos.clair;
|
||||||
option go_package = "clairpb";
|
option go_package = "clairpb";
|
||||||
option java_package = "com.coreos.clair.pb";
|
option java_package = "com.coreos.clair.pb";
|
||||||
|
|
||||||
|
service AncestryService {
|
||||||
|
// The RPC used to read the results of scanning for a particular ancestry.
|
||||||
|
rpc GetAncestry(GetAncestryRequest) returns (GetAncestryResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
get: "/ancestry/{ancestry_name}"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
// The RPC used to create a new scan of an ancestry.
|
||||||
|
rpc PostAncestry(PostAncestryRequest) returns (PostAncestryResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
post: "/ancestry"
|
||||||
|
body: "*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service StatusService {
|
||||||
|
// The RPC used to show the internal state of current Clair instance.
|
||||||
|
rpc GetStatus(GetStatusRequest) returns (GetStatusResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
get: "/status"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service NotificationService {
|
||||||
|
// The RPC used to get a particularly Notification.
|
||||||
|
rpc GetNotification(GetNotificationRequest)
|
||||||
|
returns (GetNotificationResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
get: "/notifications/{name}"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
// The RPC used to mark a Notification as read after it has been processed.
|
||||||
|
rpc MarkNotificationAsRead(MarkNotificationAsReadRequest)
|
||||||
|
returns (MarkNotificationAsReadResponse) {
|
||||||
|
option (google.api.http) = {
|
||||||
|
delete: "/notifications/{name}"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
message Vulnerability {
|
message Vulnerability {
|
||||||
// The name of the vulnerability.
|
// The name of the vulnerability.
|
||||||
string name = 1;
|
string name = 1;
|
||||||
// The name of the namespace in which the vulnerability was detected.
|
// The name of the namespace in which the vulnerability was detected.
|
||||||
string namespace_name = 2;
|
string namespace_name = 2;
|
||||||
// A description of the vulnerability according to the source for the namespace.
|
// A description of the vulnerability according to the source for the
|
||||||
|
// namespace.
|
||||||
string description = 3;
|
string description = 3;
|
||||||
// A link to the vulnerability according to the source for the namespace.
|
// A link to the vulnerability according to the source for the namespace.
|
||||||
string link = 4;
|
string link = 4;
|
||||||
|
@ -44,17 +87,45 @@ message Vulnerability {
|
||||||
repeated Feature affected_versions = 8;
|
repeated Feature affected_versions = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message Detector {
|
||||||
|
enum DType {
|
||||||
|
DETECTOR_D_TYPE_INVALID = 0;
|
||||||
|
DETECTOR_D_TYPE_NAMESPACE = 1;
|
||||||
|
DETECTOR_D_TYPE_FEATURE = 2;
|
||||||
|
}
|
||||||
|
// The name of the detector.
|
||||||
|
string name = 1;
|
||||||
|
// The version of the detector.
|
||||||
|
string version = 2;
|
||||||
|
// The type of the detector.
|
||||||
|
DType dtype = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Namespace {
|
||||||
|
// The name of the namespace.
|
||||||
|
string name = 1;
|
||||||
|
// The detector used to detect the namespace. This only exists when present
|
||||||
|
// in an Ancestry Feature.
|
||||||
|
Detector detector = 2;
|
||||||
|
}
|
||||||
|
|
||||||
message Feature {
|
message Feature {
|
||||||
// The name of the feature.
|
// The name of the feature.
|
||||||
string name = 1;
|
string name = 1;
|
||||||
// The name of the namespace in which the feature is detected.
|
// The namespace in which the feature is detected.
|
||||||
string namespace_name = 2;
|
Namespace namespace = 2;
|
||||||
// The specific version of this feature.
|
// The specific version of this feature.
|
||||||
string version = 3;
|
string version = 3;
|
||||||
// The format used to parse version numbers for the feature.
|
// The format used to parse version numbers for the feature.
|
||||||
string version_format = 4;
|
string version_format = 4;
|
||||||
|
// The detector used to detect this feature. This only exists when present
|
||||||
|
// in an Ancestry.
|
||||||
|
Detector detector = 5;
|
||||||
// The list of vulnerabilities that affect the feature.
|
// The list of vulnerabilities that affect the feature.
|
||||||
repeated Vulnerability vulnerabilities = 5;
|
repeated Vulnerability vulnerabilities = 6;
|
||||||
|
// The feature type indicates if the feature represents a source package or
|
||||||
|
// binary package.
|
||||||
|
string feature_type = 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Layer {
|
message Layer {
|
||||||
|
@ -62,27 +133,11 @@ message Layer {
|
||||||
string hash = 1;
|
string hash = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
service AncestryService {
|
|
||||||
// The RPC used to read the results of scanning for a particular ancestry.
|
|
||||||
rpc GetAncestry(GetAncestryRequest) returns (GetAncestryResponse) {
|
|
||||||
option (google.api.http) = { get: "/ancestry/{ancestry_name}" };
|
|
||||||
}
|
|
||||||
// The RPC used to create a new scan of an ancestry.
|
|
||||||
rpc PostAncestry(PostAncestryRequest) returns (PostAncestryResponse) {
|
|
||||||
option (google.api.http) = {
|
|
||||||
post: "/ancestry"
|
|
||||||
body: "*"
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message ClairStatus {
|
message ClairStatus {
|
||||||
// The configured list of feature listers used to scan an ancestry.
|
// The implemented detectors in this Clair instance
|
||||||
repeated string listers = 1;
|
repeated Detector detectors = 1;
|
||||||
// The configured list of namespace detectors used to scan an ancestry.
|
|
||||||
repeated string detectors = 2;
|
|
||||||
// The time at which the updater last ran.
|
// The time at which the updater last ran.
|
||||||
google.protobuf.Timestamp last_update_time = 3;
|
google.protobuf.Timestamp last_update_time = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetAncestryRequest {
|
message GetAncestryRequest {
|
||||||
|
@ -100,16 +155,12 @@ message GetAncestryResponse {
|
||||||
message Ancestry {
|
message Ancestry {
|
||||||
// The name of the desired ancestry.
|
// The name of the desired ancestry.
|
||||||
string name = 1;
|
string name = 1;
|
||||||
// The configured list of feature listers used to scan this ancestry.
|
|
||||||
repeated string scanned_listers = 4;
|
|
||||||
// The configured list of namespace detectors used to scan an ancestry.
|
|
||||||
repeated string scanned_detectors = 5;
|
|
||||||
// The list of layers along with detected features in each.
|
// The list of layers along with detected features in each.
|
||||||
repeated AncestryLayer layers = 6;
|
repeated AncestryLayer layers = 3;
|
||||||
}
|
}
|
||||||
// The ancestry requested.
|
// The ancestry requested.
|
||||||
Ancestry ancestry = 1;
|
Ancestry ancestry = 1;
|
||||||
// The status of Clair at the time of the request.
|
// The status of Clair at the time of the request
|
||||||
ClairStatus status = 2;
|
ClairStatus status = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,9 +168,10 @@ message PostAncestryRequest {
|
||||||
message PostLayer {
|
message PostLayer {
|
||||||
// The hash of the layer.
|
// The hash of the layer.
|
||||||
string hash = 1;
|
string hash = 1;
|
||||||
// The location of the layer (URL or filepath).
|
// The location of the layer (URL or file path).
|
||||||
string path = 2;
|
string path = 2;
|
||||||
// Any HTTP Headers that need to be used if requesting a layer over HTTP(S).
|
// Any HTTP Headers that need to be used if requesting a layer over
|
||||||
|
// HTTP(S).
|
||||||
map<string, string> headers = 3;
|
map<string, string> headers = 3;
|
||||||
}
|
}
|
||||||
// The name of the ancestry being scanned.
|
// The name of the ancestry being scanned.
|
||||||
|
@ -137,17 +189,6 @@ message PostAncestryResponse {
|
||||||
ClairStatus status = 1;
|
ClairStatus status = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
service NotificationService {
|
|
||||||
// The RPC used to get a particularly Notification.
|
|
||||||
rpc GetNotification(GetNotificationRequest) returns (GetNotificationResponse) {
|
|
||||||
option (google.api.http) = { get: "/notifications/{name}" };
|
|
||||||
}
|
|
||||||
// The RPC used to mark a Notification as read after it has been processed.
|
|
||||||
rpc MarkNotificationAsRead(MarkNotificationAsReadRequest) returns (MarkNotificationAsReadResponse) {
|
|
||||||
option (google.api.http) = { delete: "/notifications/{name}" };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetNotificationRequest {
|
message GetNotificationRequest {
|
||||||
// The current page of previous vulnerabilities for the ancestry.
|
// The current page of previous vulnerabilities for the ancestry.
|
||||||
// This will be empty when it is the first page.
|
// This will be empty when it is the first page.
|
||||||
|
@ -171,9 +212,11 @@ message GetNotificationResponse {
|
||||||
string notified = 3;
|
string notified = 3;
|
||||||
// The time at which a notification has been deleted.
|
// The time at which a notification has been deleted.
|
||||||
string deleted = 4;
|
string deleted = 4;
|
||||||
// The previous vulnerability and a paginated view of the ancestries it affects.
|
// The previous vulnerability and a paginated view of the ancestries it
|
||||||
|
// affects.
|
||||||
PagedVulnerableAncestries old = 5;
|
PagedVulnerableAncestries old = 5;
|
||||||
// The newly updated vulnerability and a paginated view of the ancestries it affects.
|
// The newly updated vulnerability and a paginated view of the
|
||||||
|
// ancestries it affects.
|
||||||
PagedVulnerableAncestries new = 6;
|
PagedVulnerableAncestries new = 6;
|
||||||
}
|
}
|
||||||
// The notification as requested.
|
// The notification as requested.
|
||||||
|
@ -182,8 +225,9 @@ message GetNotificationResponse {
|
||||||
|
|
||||||
message PagedVulnerableAncestries {
|
message PagedVulnerableAncestries {
|
||||||
message IndexedAncestryName {
|
message IndexedAncestryName {
|
||||||
// The index is an ever increasing number associated with the particular ancestry.
|
// The index is an ever increasing number associated with the particular
|
||||||
// This is useful if you're processing notifications, and need to keep track of the progress of paginating the results.
|
// ancestry. This is useful if you're processing notifications, and need
|
||||||
|
// to keep track of the progress of paginating the results.
|
||||||
int32 index = 1;
|
int32 index = 1;
|
||||||
// The name of the ancestry.
|
// The name of the ancestry.
|
||||||
string name = 2;
|
string name = 2;
|
||||||
|
@ -214,10 +258,3 @@ message GetStatusResponse {
|
||||||
// The status of the current Clair instance.
|
// The status of the current Clair instance.
|
||||||
ClairStatus status = 1;
|
ClairStatus status = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
service StatusService {
|
|
||||||
// The RPC used to show the internal state of current Clair instance.
|
|
||||||
rpc GetStatus(GetStatusRequest) returns (GetStatusResponse) {
|
|
||||||
option (google.api.http) = { get: "/status" };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
92
vendor/github.com/coreos/clair/api/v3/clairpb/clair.swagger.json
generated
vendored
92
vendor/github.com/coreos/clair/api/v3/clairpb/clair.swagger.json
generated
vendored
|
@ -156,6 +156,15 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"definitions": {
|
"definitions": {
|
||||||
|
"DetectorDType": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": [
|
||||||
|
"DETECTOR_D_TYPE_INVALID",
|
||||||
|
"DETECTOR_D_TYPE_NAMESPACE",
|
||||||
|
"DETECTOR_D_TYPE_FEATURE"
|
||||||
|
],
|
||||||
|
"default": "DETECTOR_D_TYPE_INVALID"
|
||||||
|
},
|
||||||
"GetAncestryResponseAncestry": {
|
"GetAncestryResponseAncestry": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -163,20 +172,6 @@
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The name of the desired ancestry."
|
"description": "The name of the desired ancestry."
|
||||||
},
|
},
|
||||||
"scanned_listers": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"description": "The configured list of feature listers used to scan this ancestry."
|
|
||||||
},
|
|
||||||
"scanned_detectors": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"description": "The configured list of namespace detectors used to scan an ancestry."
|
|
||||||
},
|
|
||||||
"layers": {
|
"layers": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
|
@ -223,11 +218,11 @@
|
||||||
},
|
},
|
||||||
"old": {
|
"old": {
|
||||||
"$ref": "#/definitions/clairPagedVulnerableAncestries",
|
"$ref": "#/definitions/clairPagedVulnerableAncestries",
|
||||||
"description": "The previous vulnerability and a paginated view of the ancestries it affects."
|
"description": "The previous vulnerability and a paginated view of the ancestries it\naffects."
|
||||||
},
|
},
|
||||||
"new": {
|
"new": {
|
||||||
"$ref": "#/definitions/clairPagedVulnerableAncestries",
|
"$ref": "#/definitions/clairPagedVulnerableAncestries",
|
||||||
"description": "The newly updated vulnerability and a paginated view of the ancestries it affects."
|
"description": "The newly updated vulnerability and a paginated view of the\nancestries it affects."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -237,7 +232,7 @@
|
||||||
"index": {
|
"index": {
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
"format": "int32",
|
"format": "int32",
|
||||||
"description": "The index is an ever increasing number associated with the particular ancestry.\nThis is useful if you're processing notifications, and need to keep track of the progress of paginating the results."
|
"description": "The index is an ever increasing number associated with the particular\nancestry. This is useful if you're processing notifications, and need\nto keep track of the progress of paginating the results."
|
||||||
},
|
},
|
||||||
"name": {
|
"name": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
|
@ -254,33 +249,26 @@
|
||||||
},
|
},
|
||||||
"path": {
|
"path": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The location of the layer (URL or filepath)."
|
"description": "The location of the layer (URL or file path)."
|
||||||
},
|
},
|
||||||
"headers": {
|
"headers": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"additionalProperties": {
|
"additionalProperties": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"description": "Any HTTP Headers that need to be used if requesting a layer over HTTP(S)."
|
"description": "Any HTTP Headers that need to be used if requesting a layer over\nHTTP(S)."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clairClairStatus": {
|
"clairClairStatus": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"listers": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"description": "The configured list of feature listers used to scan an ancestry."
|
|
||||||
},
|
|
||||||
"detectors": {
|
"detectors": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"type": "string"
|
"$ref": "#/definitions/clairDetector"
|
||||||
},
|
},
|
||||||
"description": "The configured list of namespace detectors used to scan an ancestry."
|
"title": "The implemented detectors in this Clair instance"
|
||||||
},
|
},
|
||||||
"last_update_time": {
|
"last_update_time": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
|
@ -289,6 +277,23 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"clairDetector": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The name of the detector."
|
||||||
|
},
|
||||||
|
"version": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The version of the detector."
|
||||||
|
},
|
||||||
|
"dtype": {
|
||||||
|
"$ref": "#/definitions/DetectorDType",
|
||||||
|
"description": "The type of the detector."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"clairFeature": {
|
"clairFeature": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -296,9 +301,9 @@
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The name of the feature."
|
"description": "The name of the feature."
|
||||||
},
|
},
|
||||||
"namespace_name": {
|
"namespace": {
|
||||||
"type": "string",
|
"$ref": "#/definitions/clairNamespace",
|
||||||
"description": "The name of the namespace in which the feature is detected."
|
"description": "The namespace in which the feature is detected."
|
||||||
},
|
},
|
||||||
"version": {
|
"version": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
|
@ -308,12 +313,20 @@
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The format used to parse version numbers for the feature."
|
"description": "The format used to parse version numbers for the feature."
|
||||||
},
|
},
|
||||||
|
"detector": {
|
||||||
|
"$ref": "#/definitions/clairDetector",
|
||||||
|
"description": "The detector used to detect this feature. This only exists when present\nin an Ancestry."
|
||||||
|
},
|
||||||
"vulnerabilities": {
|
"vulnerabilities": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/clairVulnerability"
|
"$ref": "#/definitions/clairVulnerability"
|
||||||
},
|
},
|
||||||
"description": "The list of vulnerabilities that affect the feature."
|
"description": "The list of vulnerabilities that affect the feature."
|
||||||
|
},
|
||||||
|
"feature_type": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The feature type indicates if the feature represents a source package or\nbinary package."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -326,7 +339,7 @@
|
||||||
},
|
},
|
||||||
"status": {
|
"status": {
|
||||||
"$ref": "#/definitions/clairClairStatus",
|
"$ref": "#/definitions/clairClairStatus",
|
||||||
"description": "The status of Clair at the time of the request."
|
"title": "The status of Clair at the time of the request"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -360,6 +373,19 @@
|
||||||
"clairMarkNotificationAsReadResponse": {
|
"clairMarkNotificationAsReadResponse": {
|
||||||
"type": "object"
|
"type": "object"
|
||||||
},
|
},
|
||||||
|
"clairNamespace": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The name of the namespace."
|
||||||
|
},
|
||||||
|
"detector": {
|
||||||
|
"$ref": "#/definitions/clairDetector",
|
||||||
|
"description": "The detector used to detect the namespace. This only exists when present\nin an Ancestry Feature."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"clairPagedVulnerableAncestries": {
|
"clairPagedVulnerableAncestries": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -431,7 +457,7 @@
|
||||||
},
|
},
|
||||||
"description": {
|
"description": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "A description of the vulnerability according to the source for the namespace."
|
"description": "A description of the vulnerability according to the source for the\nnamespace."
|
||||||
},
|
},
|
||||||
"link": {
|
"link": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
|
|
48
vendor/github.com/coreos/clair/api/v3/clairpb/convert.go
generated
vendored
48
vendor/github.com/coreos/clair/api/v3/clairpb/convert.go
generated
vendored
|
@ -22,6 +22,13 @@ import (
|
||||||
"github.com/coreos/clair/ext/versionfmt"
|
"github.com/coreos/clair/ext/versionfmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DatabaseDetectorTypeMapping maps the database detector type to the integer
|
||||||
|
// enum proto.
|
||||||
|
var DatabaseDetectorTypeMapping = map[database.DetectorType]Detector_DType{
|
||||||
|
database.NamespaceDetectorType: Detector_DType(1),
|
||||||
|
database.FeatureDetectorType: Detector_DType(2),
|
||||||
|
}
|
||||||
|
|
||||||
// PagedVulnerableAncestriesFromDatabaseModel converts database
|
// PagedVulnerableAncestriesFromDatabaseModel converts database
|
||||||
// PagedVulnerableAncestries to api PagedVulnerableAncestries and assigns
|
// PagedVulnerableAncestries to api PagedVulnerableAncestries and assigns
|
||||||
// indexes to ancestries.
|
// indexes to ancestries.
|
||||||
|
@ -92,6 +99,7 @@ func NotificationFromDatabaseModel(dbNotification database.VulnerabilityNotifica
|
||||||
return ¬i, nil
|
return ¬i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VulnerabilityFromDatabaseModel converts database Vulnerability to api Vulnerability.
|
||||||
func VulnerabilityFromDatabaseModel(dbVuln database.Vulnerability) (*Vulnerability, error) {
|
func VulnerabilityFromDatabaseModel(dbVuln database.Vulnerability) (*Vulnerability, error) {
|
||||||
metaString := ""
|
metaString := ""
|
||||||
if dbVuln.Metadata != nil {
|
if dbVuln.Metadata != nil {
|
||||||
|
@ -112,6 +120,7 @@ func VulnerabilityFromDatabaseModel(dbVuln database.Vulnerability) (*Vulnerabili
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VulnerabilityWithFixedInFromDatabaseModel converts database VulnerabilityWithFixedIn to api Vulnerability.
|
||||||
func VulnerabilityWithFixedInFromDatabaseModel(dbVuln database.VulnerabilityWithFixedIn) (*Vulnerability, error) {
|
func VulnerabilityWithFixedInFromDatabaseModel(dbVuln database.VulnerabilityWithFixedIn) (*Vulnerability, error) {
|
||||||
vuln, err := VulnerabilityFromDatabaseModel(dbVuln.Vulnerability)
|
vuln, err := VulnerabilityFromDatabaseModel(dbVuln.Vulnerability)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -122,23 +131,44 @@ func VulnerabilityWithFixedInFromDatabaseModel(dbVuln database.VulnerabilityWith
|
||||||
return vuln, nil
|
return vuln, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LayerFromDatabaseModel converts database layer to api layer.
|
|
||||||
func LayerFromDatabaseModel(dbLayer database.LayerMetadata) *Layer {
|
|
||||||
layer := Layer{Hash: dbLayer.Hash}
|
|
||||||
return &layer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamespacedFeatureFromDatabaseModel converts database namespacedFeature to api Feature.
|
// NamespacedFeatureFromDatabaseModel converts database namespacedFeature to api Feature.
|
||||||
func NamespacedFeatureFromDatabaseModel(feature database.NamespacedFeature) *Feature {
|
func NamespacedFeatureFromDatabaseModel(feature database.AncestryFeature) *Feature {
|
||||||
version := feature.Feature.Version
|
version := feature.Feature.Version
|
||||||
if version == versionfmt.MaxVersion {
|
if version == versionfmt.MaxVersion {
|
||||||
version = "None"
|
version = "None"
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Feature{
|
return &Feature{
|
||||||
Name: feature.Feature.Name,
|
Name: feature.Feature.Name,
|
||||||
NamespaceName: feature.Namespace.Name,
|
Namespace: &Namespace{
|
||||||
|
Name: feature.Namespace.Name,
|
||||||
|
Detector: DetectorFromDatabaseModel(feature.NamespaceBy),
|
||||||
|
},
|
||||||
VersionFormat: feature.Namespace.VersionFormat,
|
VersionFormat: feature.Namespace.VersionFormat,
|
||||||
Version: version,
|
Version: version,
|
||||||
|
Detector: DetectorFromDatabaseModel(feature.FeatureBy),
|
||||||
|
FeatureType: string(feature.Type),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DetectorFromDatabaseModel converts database detector to api detector.
|
||||||
|
func DetectorFromDatabaseModel(detector database.Detector) *Detector {
|
||||||
|
if !detector.Valid() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &Detector{
|
||||||
|
Name: detector.Name,
|
||||||
|
Version: detector.Version,
|
||||||
|
Dtype: DatabaseDetectorTypeMapping[detector.DType],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetectorsFromDatabaseModel converts database detectors to api detectors.
|
||||||
|
func DetectorsFromDatabaseModel(dbDetectors []database.Detector) []*Detector {
|
||||||
|
detectors := make([]*Detector, 0, len(dbDetectors))
|
||||||
|
for _, d := range dbDetectors {
|
||||||
|
detectors = append(detectors, DetectorFromDatabaseModel(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
return detectors
|
||||||
|
}
|
||||||
|
|
96
vendor/github.com/coreos/clair/database/ancestry.go
generated
vendored
Normal file
96
vendor/github.com/coreos/clair/database/ancestry.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
// Copyright 2019 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
// Ancestry is a manifest that keeps all layers in an image in order.
|
||||||
|
type Ancestry struct {
|
||||||
|
// Name is a globally unique value for a set of layers. This is often the
|
||||||
|
// sha256 digest of an OCI/Docker manifest.
|
||||||
|
Name string `json:"name"`
|
||||||
|
// By contains the processors that are used when computing the
|
||||||
|
// content of this ancestry.
|
||||||
|
By []Detector `json:"by"`
|
||||||
|
// Layers should be ordered and i_th layer is the parent of i+1_th layer in
|
||||||
|
// the slice.
|
||||||
|
Layers []AncestryLayer `json:"layers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid checks if the ancestry is compliant to spec.
|
||||||
|
func (a *Ancestry) Valid() bool {
|
||||||
|
if a == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Name == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range a.By {
|
||||||
|
if !d.Valid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range a.Layers {
|
||||||
|
if !l.Valid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AncestryLayer is a layer with all detected namespaced features.
|
||||||
|
type AncestryLayer struct {
|
||||||
|
// Hash is the sha-256 tarsum on the layer's blob content.
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
// Features are the features introduced by this layer when it was
|
||||||
|
// processed.
|
||||||
|
Features []AncestryFeature `json:"features"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid checks if the Ancestry Layer is compliant to the spec.
|
||||||
|
func (l *AncestryLayer) Valid() bool {
|
||||||
|
if l == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.Hash == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFeatures returns the Ancestry's features.
|
||||||
|
func (l *AncestryLayer) GetFeatures() []NamespacedFeature {
|
||||||
|
nsf := make([]NamespacedFeature, 0, len(l.Features))
|
||||||
|
for _, f := range l.Features {
|
||||||
|
nsf = append(nsf, f.NamespacedFeature)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nsf
|
||||||
|
}
|
||||||
|
|
||||||
|
// AncestryFeature is a namespaced feature with the detectors used to
|
||||||
|
// find this feature.
|
||||||
|
type AncestryFeature struct {
|
||||||
|
NamespacedFeature `json:"namespacedFeature"`
|
||||||
|
|
||||||
|
// FeatureBy is the detector that detected the feature.
|
||||||
|
FeatureBy Detector `json:"featureBy"`
|
||||||
|
// NamespaceBy is the detector that detected the namespace.
|
||||||
|
NamespaceBy Detector `json:"namespaceBy"`
|
||||||
|
}
|
57
vendor/github.com/coreos/clair/database/database.go
generated
vendored
57
vendor/github.com/coreos/clair/database/database.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2017 clair authors
|
// Copyright 2019 clair authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -17,7 +17,6 @@
|
||||||
package database
|
package database
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -27,12 +26,20 @@ import (
|
||||||
var (
|
var (
|
||||||
// ErrBackendException is an error that occurs when the database backend
|
// ErrBackendException is an error that occurs when the database backend
|
||||||
// does not work properly (ie. unreachable).
|
// does not work properly (ie. unreachable).
|
||||||
ErrBackendException = errors.New("database: an error occurred when querying the backend")
|
ErrBackendException = NewStorageError("an error occurred when querying the backend")
|
||||||
|
|
||||||
// ErrInconsistent is an error that occurs when a database consistency check
|
// ErrInconsistent is an error that occurs when a database consistency check
|
||||||
// fails (i.e. when an entity which is supposed to be unique is detected
|
// fails (i.e. when an entity which is supposed to be unique is detected
|
||||||
// twice)
|
// twice)
|
||||||
ErrInconsistent = errors.New("database: inconsistent database")
|
ErrInconsistent = NewStorageError("inconsistent database")
|
||||||
|
|
||||||
|
// ErrInvalidParameters is an error that occurs when the parameters are not valid.
|
||||||
|
ErrInvalidParameters = NewStorageError("parameters are not valid")
|
||||||
|
|
||||||
|
// ErrMissingEntities is an error that occurs when an associated immutable
|
||||||
|
// entity doesn't exist in the database. This error can indicate a wrong
|
||||||
|
// implementation or corrupted database.
|
||||||
|
ErrMissingEntities = NewStorageError("associated immutable entities are missing in the database")
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegistrableComponentConfig is a configuration block that can be used to
|
// RegistrableComponentConfig is a configuration block that can be used to
|
||||||
|
@ -99,6 +106,9 @@ type Session interface {
|
||||||
// namespaced features. If the ancestry is not found, return false.
|
// namespaced features. If the ancestry is not found, return false.
|
||||||
FindAncestry(name string) (ancestry Ancestry, found bool, err error)
|
FindAncestry(name string) (ancestry Ancestry, found bool, err error)
|
||||||
|
|
||||||
|
// PersistDetector inserts a slice of detectors if not in the database.
|
||||||
|
PersistDetectors(detectors []Detector) error
|
||||||
|
|
||||||
// PersistFeatures inserts a set of features if not in the database.
|
// PersistFeatures inserts a set of features if not in the database.
|
||||||
PersistFeatures(features []Feature) error
|
PersistFeatures(features []Feature) error
|
||||||
|
|
||||||
|
@ -120,12 +130,10 @@ type Session interface {
|
||||||
// PersistNamespaces inserts a set of namespaces if not in the database.
|
// PersistNamespaces inserts a set of namespaces if not in the database.
|
||||||
PersistNamespaces([]Namespace) error
|
PersistNamespaces([]Namespace) error
|
||||||
|
|
||||||
// PersistLayer persists a layer's content in the database. The given
|
// PersistLayer appends a layer's content in the database.
|
||||||
// namespaces and features can be partial content of this layer.
|
|
||||||
//
|
//
|
||||||
// The layer, namespaces and features are expected to be already existing
|
// If any feature, namespace, or detector is not in the database, it returns not found error.
|
||||||
// in the database.
|
PersistLayer(hash string, features []LayerFeature, namespaces []LayerNamespace, detectedBy []Detector) error
|
||||||
PersistLayer(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
|
||||||
|
|
||||||
// FindLayer returns a layer with all detected features and
|
// FindLayer returns a layer with all detected features and
|
||||||
// namespaces.
|
// namespaces.
|
||||||
|
@ -157,8 +165,8 @@ type Session interface {
|
||||||
// affected ancestries affected by old or new vulnerability.
|
// affected ancestries affected by old or new vulnerability.
|
||||||
//
|
//
|
||||||
// Because the number of affected ancestries maybe large, they are paginated
|
// Because the number of affected ancestries maybe large, they are paginated
|
||||||
// and their pages are specified by the paination token, which, if empty, are
|
// and their pages are specified by the pagination token, which should be
|
||||||
// always considered first page.
|
// considered first page when it's empty.
|
||||||
FindVulnerabilityNotification(name string, limit int, oldVulnerabilityPage pagination.Token, newVulnerabilityPage pagination.Token) (noti VulnerabilityNotificationWithVulnerable, found bool, err error)
|
FindVulnerabilityNotification(name string, limit int, oldVulnerabilityPage pagination.Token, newVulnerabilityPage pagination.Token) (noti VulnerabilityNotificationWithVulnerable, found bool, err error)
|
||||||
|
|
||||||
// MarkNotificationAsRead marks a Notification as notified now, assuming
|
// MarkNotificationAsRead marks a Notification as notified now, assuming
|
||||||
|
@ -174,23 +182,22 @@ type Session interface {
|
||||||
// FindKeyValue retrieves a value from the given key.
|
// FindKeyValue retrieves a value from the given key.
|
||||||
FindKeyValue(key string) (value string, found bool, err error)
|
FindKeyValue(key string) (value string, found bool, err error)
|
||||||
|
|
||||||
// Lock creates or renew a Lock in the database with the given name, owner
|
// AcquireLock acquires a brand new lock in the database with a given name
|
||||||
// and duration.
|
// for the given duration.
|
||||||
//
|
//
|
||||||
// After the specified duration, the Lock expires by itself if it hasn't been
|
// A lock can only have one owner.
|
||||||
// unlocked, and thus, let other users create a Lock with the same name.
|
// This method should NOT block until a lock is acquired.
|
||||||
// However, the owner can renew its Lock by setting renew to true.
|
AcquireLock(name, owner string, duration time.Duration) (acquired bool, expiration time.Time, err error)
|
||||||
// Lock should not block, it should instead returns whether the Lock has been
|
|
||||||
// successfully acquired/renewed. If it's the case, the expiration time of
|
|
||||||
// that Lock is returned as well.
|
|
||||||
Lock(name string, owner string, duration time.Duration, renew bool) (success bool, expiration time.Time, err error)
|
|
||||||
|
|
||||||
// Unlock releases an existing Lock.
|
// ExtendLock extends an existing lock such that the lock will expire at the
|
||||||
Unlock(name, owner string) error
|
// current time plus the provided duration.
|
||||||
|
//
|
||||||
|
// This method should return immediately with an error if the lock does not
|
||||||
|
// exist.
|
||||||
|
ExtendLock(name, owner string, duration time.Duration) (extended bool, expiration time.Time, err error)
|
||||||
|
|
||||||
// FindLock returns the owner of a Lock specified by the name, and its
|
// ReleaseLock releases an existing lock.
|
||||||
// expiration time if it exists.
|
ReleaseLock(name, owner string) error
|
||||||
FindLock(name string) (owner string, expiration time.Time, found bool, err error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Datastore represents a persistent data store
|
// Datastore represents a persistent data store
|
||||||
|
|
538
vendor/github.com/coreos/clair/database/dbutil.go
generated
vendored
Normal file
538
vendor/github.com/coreos/clair/database/dbutil.go
generated
vendored
Normal file
|
@ -0,0 +1,538 @@
|
||||||
|
// Copyright 2018 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/coreos/clair/pkg/commonerr"
|
||||||
|
"github.com/coreos/clair/pkg/pagination"
|
||||||
|
"github.com/deckarep/golang-set"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeduplicateNamespaces deduplicates a list of namespaces.
|
||||||
|
func DeduplicateNamespaces(namespaces ...Namespace) []Namespace {
|
||||||
|
nsSet := mapset.NewSet()
|
||||||
|
for _, ns := range namespaces {
|
||||||
|
nsSet.Add(ns)
|
||||||
|
}
|
||||||
|
|
||||||
|
uniqueNamespaces := make([]Namespace, 0, nsSet.Cardinality())
|
||||||
|
for ns := range nsSet.Iter() {
|
||||||
|
uniqueNamespaces = append(uniqueNamespaces, ns.(Namespace))
|
||||||
|
}
|
||||||
|
|
||||||
|
return uniqueNamespaces
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeduplicateFeatures deduplicates a list of list of features.
|
||||||
|
func DeduplicateFeatures(features ...Feature) []Feature {
|
||||||
|
fSet := mapset.NewSet()
|
||||||
|
for _, f := range features {
|
||||||
|
fSet.Add(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ConvertFeatureSetToFeatures(fSet)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertFeatureSetToFeatures converts a feature set to an array of features
|
||||||
|
func ConvertFeatureSetToFeatures(features mapset.Set) []Feature {
|
||||||
|
uniqueFeatures := make([]Feature, 0, features.Cardinality())
|
||||||
|
for f := range features.Iter() {
|
||||||
|
uniqueFeatures = append(uniqueFeatures, f.(Feature))
|
||||||
|
}
|
||||||
|
|
||||||
|
return uniqueFeatures
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConvertFeatureSetToLayerFeatures(features mapset.Set) []LayerFeature {
|
||||||
|
uniqueLayerFeatures := make([]LayerFeature, 0, features.Cardinality())
|
||||||
|
for f := range features.Iter() {
|
||||||
|
feature := f.(Feature)
|
||||||
|
layerFeature := LayerFeature{
|
||||||
|
Feature: feature,
|
||||||
|
}
|
||||||
|
uniqueLayerFeatures = append(uniqueLayerFeatures, layerFeature)
|
||||||
|
}
|
||||||
|
|
||||||
|
return uniqueLayerFeatures
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindKeyValueAndRollback wraps session FindKeyValue function with begin and
|
||||||
|
// roll back.
|
||||||
|
func FindKeyValueAndRollback(datastore Datastore, key string) (value string, ok bool, err error) {
|
||||||
|
var tx Session
|
||||||
|
tx, err = datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
value, ok, err = tx.FindKeyValue(key)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistPartialLayerAndCommit wraps session PersistLayer function with begin and
|
||||||
|
// commit.
|
||||||
|
func PersistPartialLayerAndCommit(datastore Datastore, layer *Layer) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
if err := tx.PersistLayer(layer.Hash, layer.Features, layer.Namespaces, layer.By); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistFeaturesAndCommit wraps session PersistFeaturesAndCommit function with begin and commit.
|
||||||
|
func PersistFeaturesAndCommit(datastore Datastore, features []Feature) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
if err := tx.PersistFeatures(features); err != nil {
|
||||||
|
serialized, _ := json.Marshal(features)
|
||||||
|
log.WithError(err).WithField("feature", string(serialized)).Error("failed to store features")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistNamespacesAndCommit wraps session PersistNamespaces function with
|
||||||
|
// begin and commit.
|
||||||
|
func PersistNamespacesAndCommit(datastore Datastore, namespaces []Namespace) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
if err := tx.PersistNamespaces(namespaces); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindAncestryAndRollback wraps session FindAncestry function with begin and
|
||||||
|
// rollback.
|
||||||
|
func FindAncestryAndRollback(datastore Datastore, name string) (Ancestry, bool, error) {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return Ancestry{}, false, err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
return tx.FindAncestry(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindLayerAndRollback wraps session FindLayer function with begin and rollback.
|
||||||
|
func FindLayerAndRollback(datastore Datastore, hash string) (layer *Layer, ok bool, err error) {
|
||||||
|
var tx Session
|
||||||
|
if tx, err = datastore.Begin(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer tx.Rollback()
|
||||||
|
// TODO(sidac): In order to make the session interface more idiomatic, we'll
|
||||||
|
// return the pointer value in the future.
|
||||||
|
var dereferencedLayer Layer
|
||||||
|
dereferencedLayer, ok, err = tx.FindLayer(hash)
|
||||||
|
layer = &dereferencedLayer
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeduplicateNamespacedFeatures returns a copy of all unique features in the
|
||||||
|
// input.
|
||||||
|
func DeduplicateNamespacedFeatures(features []NamespacedFeature) []NamespacedFeature {
|
||||||
|
nsSet := mapset.NewSet()
|
||||||
|
for _, ns := range features {
|
||||||
|
nsSet.Add(ns)
|
||||||
|
}
|
||||||
|
|
||||||
|
uniqueFeatures := make([]NamespacedFeature, 0, nsSet.Cardinality())
|
||||||
|
for ns := range nsSet.Iter() {
|
||||||
|
uniqueFeatures = append(uniqueFeatures, ns.(NamespacedFeature))
|
||||||
|
}
|
||||||
|
|
||||||
|
return uniqueFeatures
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAncestryFeatures returns a list of unique namespaced features in the
|
||||||
|
// ancestry.
|
||||||
|
func GetAncestryFeatures(ancestry Ancestry) []NamespacedFeature {
|
||||||
|
features := []NamespacedFeature{}
|
||||||
|
for _, layer := range ancestry.Layers {
|
||||||
|
features = append(features, layer.GetFeatures()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return DeduplicateNamespacedFeatures(features)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpsertAncestryAndCommit wraps session UpsertAncestry function with begin and commit.
|
||||||
|
func UpsertAncestryAndCommit(datastore Datastore, ancestry *Ancestry) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = tx.UpsertAncestry(*ancestry); err != nil {
|
||||||
|
log.WithError(err).Error("failed to upsert the ancestry")
|
||||||
|
serialized, _ := json.Marshal(ancestry)
|
||||||
|
log.Debug(string(serialized))
|
||||||
|
|
||||||
|
tx.Rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistNamespacedFeaturesAndCommit wraps session PersistNamespacedFeatures function
|
||||||
|
// with begin and commit.
|
||||||
|
func PersistNamespacedFeaturesAndCommit(datastore Datastore, features []NamespacedFeature) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.PersistNamespacedFeatures(features); err != nil {
|
||||||
|
tx.Rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheRelatedVulnerabilityAndCommit wraps session CacheAffectedNamespacedFeatures
|
||||||
|
// function with begin and commit.
|
||||||
|
func CacheRelatedVulnerabilityAndCommit(datastore Datastore, features []NamespacedFeature) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.CacheAffectedNamespacedFeatures(features); err != nil {
|
||||||
|
tx.Rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntersectDetectors returns the detectors in both d1 and d2.
|
||||||
|
func IntersectDetectors(d1 []Detector, d2 []Detector) []Detector {
|
||||||
|
d1Set := mapset.NewSet()
|
||||||
|
for _, d := range d1 {
|
||||||
|
d1Set.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
d2Set := mapset.NewSet()
|
||||||
|
for _, d := range d2 {
|
||||||
|
d2Set.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
inter := d1Set.Intersect(d2Set)
|
||||||
|
detectors := make([]Detector, 0, inter.Cardinality())
|
||||||
|
for d := range inter.Iter() {
|
||||||
|
detectors = append(detectors, d.(Detector))
|
||||||
|
}
|
||||||
|
|
||||||
|
return detectors
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiffDetectors returns the detectors belongs to d1 but not d2
|
||||||
|
func DiffDetectors(d1 []Detector, d2 []Detector) []Detector {
|
||||||
|
d1Set := mapset.NewSet()
|
||||||
|
for _, d := range d1 {
|
||||||
|
d1Set.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
d2Set := mapset.NewSet()
|
||||||
|
for _, d := range d2 {
|
||||||
|
d2Set.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
diff := d1Set.Difference(d2Set)
|
||||||
|
detectors := make([]Detector, 0, diff.Cardinality())
|
||||||
|
for d := range diff.Iter() {
|
||||||
|
detectors = append(detectors, d.(Detector))
|
||||||
|
}
|
||||||
|
|
||||||
|
return detectors
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeLayers merges all content in new layer to l, where the content is
|
||||||
|
// updated.
|
||||||
|
func MergeLayers(l *Layer, new *Layer) *Layer {
|
||||||
|
featureSet := mapset.NewSet()
|
||||||
|
namespaceSet := mapset.NewSet()
|
||||||
|
bySet := mapset.NewSet()
|
||||||
|
|
||||||
|
for _, f := range l.Features {
|
||||||
|
featureSet.Add(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ns := range l.Namespaces {
|
||||||
|
namespaceSet.Add(ns)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range l.By {
|
||||||
|
bySet.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, feature := range new.Features {
|
||||||
|
if !featureSet.Contains(feature) {
|
||||||
|
l.Features = append(l.Features, feature)
|
||||||
|
featureSet.Add(feature)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, namespace := range new.Namespaces {
|
||||||
|
if !namespaceSet.Contains(namespace) {
|
||||||
|
l.Namespaces = append(l.Namespaces, namespace)
|
||||||
|
namespaceSet.Add(namespace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, detector := range new.By {
|
||||||
|
if !bySet.Contains(detector) {
|
||||||
|
l.By = append(l.By, detector)
|
||||||
|
bySet.Add(detector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcquireLock acquires a named global lock for a duration.
|
||||||
|
func AcquireLock(datastore Datastore, name, owner string, duration time.Duration) (acquired bool, expiration time.Time) {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return false, time.Time{}
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
locked, t, err := tx.AcquireLock(name, owner, duration)
|
||||||
|
if err != nil {
|
||||||
|
return false, time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if locked {
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return false, time.Time{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return locked, t
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtendLock extends the duration of an existing global lock for the given
|
||||||
|
// duration.
|
||||||
|
func ExtendLock(ds Datastore, name, whoami string, desiredLockDuration time.Duration) (bool, time.Time) {
|
||||||
|
tx, err := ds.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return false, time.Time{}
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
locked, expiration, err := tx.ExtendLock(name, whoami, desiredLockDuration)
|
||||||
|
if err != nil {
|
||||||
|
return false, time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if locked {
|
||||||
|
if err := tx.Commit(); err == nil {
|
||||||
|
return locked, expiration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReleaseLock releases a named global lock.
|
||||||
|
func ReleaseLock(datastore Datastore, name, owner string) {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
if err := tx.ReleaseLock(name, owner); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistDetectorsAndCommit stores the detectors in the data store.
|
||||||
|
func PersistDetectorsAndCommit(store Datastore, detectors []Detector) error {
|
||||||
|
tx, err := store.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer tx.Rollback()
|
||||||
|
if err := tx.PersistDetectors(detectors); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkNotificationAsReadAndCommit marks a notification as read.
|
||||||
|
func MarkNotificationAsReadAndCommit(store Datastore, name string) (bool, error) {
|
||||||
|
tx, err := store.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer tx.Rollback()
|
||||||
|
err = tx.DeleteNotification(name)
|
||||||
|
if err == commonerr.ErrNotFound {
|
||||||
|
return false, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindAffectedNamespacedFeaturesAndRollback finds the vulnerabilities on each
|
||||||
|
// feature.
|
||||||
|
func FindAffectedNamespacedFeaturesAndRollback(store Datastore, features []NamespacedFeature) ([]NullableAffectedNamespacedFeature, error) {
|
||||||
|
tx, err := store.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer tx.Rollback()
|
||||||
|
nullableFeatures, err := tx.FindAffectedNamespacedFeatures(features)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nullableFeatures, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindVulnerabilityNotificationAndRollback finds the vulnerability notification
|
||||||
|
// and rollback.
|
||||||
|
func FindVulnerabilityNotificationAndRollback(store Datastore, name string, limit int, oldVulnerabilityPage pagination.Token, newVulnerabilityPage pagination.Token) (VulnerabilityNotificationWithVulnerable, bool, error) {
|
||||||
|
tx, err := store.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return VulnerabilityNotificationWithVulnerable{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer tx.Rollback()
|
||||||
|
return tx.FindVulnerabilityNotification(name, limit, oldVulnerabilityPage, newVulnerabilityPage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindNewNotification finds notifications either never notified or notified
|
||||||
|
// before the given time.
|
||||||
|
func FindNewNotification(store Datastore, notifiedBefore time.Time) (NotificationHook, bool, error) {
|
||||||
|
tx, err := store.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return NotificationHook{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer tx.Rollback()
|
||||||
|
return tx.FindNewNotification(notifiedBefore)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateKeyValueAndCommit stores the key value to storage.
|
||||||
|
func UpdateKeyValueAndCommit(store Datastore, key, value string) error {
|
||||||
|
tx, err := store.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer tx.Rollback()
|
||||||
|
if err = tx.UpdateKeyValue(key, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertVulnerabilityNotificationsAndCommit inserts the notifications into db
|
||||||
|
// and commit.
|
||||||
|
func InsertVulnerabilityNotificationsAndCommit(store Datastore, notifications []VulnerabilityNotification) error {
|
||||||
|
tx, err := store.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
if err := tx.InsertVulnerabilityNotifications(notifications); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindVulnerabilitiesAndRollback finds the vulnerabilities based on given ids.
|
||||||
|
func FindVulnerabilitiesAndRollback(store Datastore, ids []VulnerabilityID) ([]NullableVulnerability, error) {
|
||||||
|
tx, err := store.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer tx.Rollback()
|
||||||
|
return tx.FindVulnerabilities(ids)
|
||||||
|
}
|
||||||
|
|
||||||
|
func UpdateVulnerabilitiesAndCommit(store Datastore, toRemove []VulnerabilityID, toAdd []VulnerabilityWithAffected) error {
|
||||||
|
tx, err := store.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.DeleteVulnerabilities(toRemove); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.InsertVulnerabilities(toAdd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
144
vendor/github.com/coreos/clair/database/detector.go
generated
vendored
Normal file
144
vendor/github.com/coreos/clair/database/detector.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
// Copyright 2018 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NamespaceDetectorType is a type of detector that extracts the namespaces.
|
||||||
|
NamespaceDetectorType DetectorType = "namespace"
|
||||||
|
// FeatureDetectorType is a type of detector that extracts the features.
|
||||||
|
FeatureDetectorType DetectorType = "feature"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DetectorTypes contains all detector types.
|
||||||
|
var (
|
||||||
|
DetectorTypes = []DetectorType{
|
||||||
|
NamespaceDetectorType,
|
||||||
|
FeatureDetectorType,
|
||||||
|
}
|
||||||
|
// ErrFailedToParseDetectorType is the error returned when a detector type could
|
||||||
|
// not be parsed from a string.
|
||||||
|
ErrFailedToParseDetectorType = errors.New("failed to parse DetectorType from input")
|
||||||
|
// ErrInvalidDetector is the error returned when a detector from database has
|
||||||
|
// invalid name or version or type.
|
||||||
|
ErrInvalidDetector = errors.New("the detector has invalid metadata")
|
||||||
|
)
|
||||||
|
|
||||||
|
// DetectorType is the type of a detector.
|
||||||
|
type DetectorType string
|
||||||
|
|
||||||
|
// Value implements the database/sql/driver.Valuer interface.
|
||||||
|
func (s DetectorType) Value() (driver.Value, error) {
|
||||||
|
return string(s), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan implements the database/sql.Scanner interface.
|
||||||
|
func (s *DetectorType) Scan(value interface{}) error {
|
||||||
|
val, ok := value.([]byte)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("could not scan a Severity from a non-string input")
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
*s, err = NewDetectorType(string(val))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDetectorType attempts to parse a string into a standard DetectorType
|
||||||
|
// value.
|
||||||
|
func NewDetectorType(s string) (DetectorType, error) {
|
||||||
|
for _, ss := range DetectorTypes {
|
||||||
|
if strings.EqualFold(s, string(ss)) {
|
||||||
|
return ss, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", ErrFailedToParseDetectorType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid checks if a detector type is defined.
|
||||||
|
func (s DetectorType) Valid() bool {
|
||||||
|
for _, t := range DetectorTypes {
|
||||||
|
if s == t {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Detector is an versioned Clair extension.
|
||||||
|
type Detector struct {
|
||||||
|
// Name of an extension should be non-empty and uniquely identifies the
|
||||||
|
// extension.
|
||||||
|
Name string `json:"name"`
|
||||||
|
// Version of an extension should be non-empty.
|
||||||
|
Version string `json:"version"`
|
||||||
|
// DType is the type of the extension and should be one of the types in
|
||||||
|
// DetectorTypes.
|
||||||
|
DType DetectorType `json:"dtype"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid checks if all fields in the detector satisfies the spec.
|
||||||
|
func (d Detector) Valid() bool {
|
||||||
|
if d.Name == "" || d.Version == "" || !d.DType.Valid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a unique string representation of the detector.
|
||||||
|
func (d Detector) String() string {
|
||||||
|
return fmt.Sprintf("%s:%s", d.Name, d.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNamespaceDetector returns a new namespace detector.
|
||||||
|
func NewNamespaceDetector(name, version string) Detector {
|
||||||
|
return Detector{
|
||||||
|
Name: name,
|
||||||
|
Version: version,
|
||||||
|
DType: NamespaceDetectorType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFeatureDetector returns a new feature detector.
|
||||||
|
func NewFeatureDetector(name, version string) Detector {
|
||||||
|
return Detector{
|
||||||
|
Name: name,
|
||||||
|
Version: version,
|
||||||
|
DType: FeatureDetectorType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SerializeDetectors returns the string representation of given detectors.
|
||||||
|
func SerializeDetectors(detectors []Detector) []string {
|
||||||
|
strDetectors := []string{}
|
||||||
|
for _, d := range detectors {
|
||||||
|
strDetectors = append(strDetectors, d.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return strDetectors
|
||||||
|
}
|
35
vendor/github.com/coreos/clair/database/error.go
generated
vendored
Normal file
35
vendor/github.com/coreos/clair/database/error.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
// Copyright 2019 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
// StorageError is database error
|
||||||
|
type StorageError struct {
|
||||||
|
reason string
|
||||||
|
original error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StorageError) Error() string {
|
||||||
|
return e.reason
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStorageErrorWithInternalError creates a new database error
|
||||||
|
func NewStorageErrorWithInternalError(reason string, originalError error) *StorageError {
|
||||||
|
return &StorageError{reason, originalError}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStorageError creates a new database error
|
||||||
|
func NewStorageError(reason string) *StorageError {
|
||||||
|
return &StorageError{reason, nil}
|
||||||
|
}
|
96
vendor/github.com/coreos/clair/database/feature.go
generated
vendored
Normal file
96
vendor/github.com/coreos/clair/database/feature.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
// Copyright 2019 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
// Feature represents a package detected in a layer but the namespace is not
|
||||||
|
// determined.
|
||||||
|
//
|
||||||
|
// e.g. Name: Libssl1.0, Version: 1.0, VersionFormat: dpkg, Type: binary
|
||||||
|
// dpkg is the version format of the installer package manager, which in this
|
||||||
|
// case could be dpkg or apk.
|
||||||
|
type Feature struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
VersionFormat string `json:"versionFormat"`
|
||||||
|
Type FeatureType `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NamespacedFeature is a feature with determined namespace and can be affected
|
||||||
|
// by vulnerabilities.
|
||||||
|
//
|
||||||
|
// e.g. OpenSSL 1.0 dpkg Debian:7.
|
||||||
|
type NamespacedFeature struct {
|
||||||
|
Feature `json:"feature"`
|
||||||
|
|
||||||
|
Namespace Namespace `json:"namespace"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AffectedNamespacedFeature is a namespaced feature affected by the
|
||||||
|
// vulnerabilities with fixed-in versions for this feature.
|
||||||
|
type AffectedNamespacedFeature struct {
|
||||||
|
NamespacedFeature
|
||||||
|
|
||||||
|
AffectedBy []VulnerabilityWithFixedIn
|
||||||
|
}
|
||||||
|
|
||||||
|
// VulnerabilityWithFixedIn is used for AffectedNamespacedFeature to retrieve
|
||||||
|
// the affecting vulnerabilities and the fixed-in versions for the feature.
|
||||||
|
type VulnerabilityWithFixedIn struct {
|
||||||
|
Vulnerability
|
||||||
|
|
||||||
|
FixedInVersion string
|
||||||
|
}
|
||||||
|
|
||||||
|
// AffectedFeature is used to determine whether a namespaced feature is affected
|
||||||
|
// by a Vulnerability. Namespace and Feature Name is unique. Affected Feature is
|
||||||
|
// bound to vulnerability.
|
||||||
|
type AffectedFeature struct {
|
||||||
|
// FeatureType determines which type of package it affects.
|
||||||
|
FeatureType FeatureType
|
||||||
|
Namespace Namespace
|
||||||
|
FeatureName string
|
||||||
|
// FixedInVersion is known next feature version that's not affected by the
|
||||||
|
// vulnerability. Empty FixedInVersion means the unaffected version is
|
||||||
|
// unknown.
|
||||||
|
FixedInVersion string
|
||||||
|
// AffectedVersion contains the version range to determine whether or not a
|
||||||
|
// feature is affected.
|
||||||
|
AffectedVersion string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NullableAffectedNamespacedFeature is an affectednamespacedfeature with
|
||||||
|
// whether it's found in datastore.
|
||||||
|
type NullableAffectedNamespacedFeature struct {
|
||||||
|
AffectedNamespacedFeature
|
||||||
|
|
||||||
|
Valid bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFeature(name string, version string, versionFormat string, featureType FeatureType) *Feature {
|
||||||
|
return &Feature{name, version, versionFormat, featureType}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBinaryPackage(name string, version string, versionFormat string) *Feature {
|
||||||
|
return &Feature{name, version, versionFormat, BinaryPackage}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSourcePackage(name string, version string, versionFormat string) *Feature {
|
||||||
|
return &Feature{name, version, versionFormat, SourcePackage}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNamespacedFeature(namespace *Namespace, feature *Feature) *NamespacedFeature {
|
||||||
|
// TODO: namespaced feature should use pointer values
|
||||||
|
return &NamespacedFeature{*feature, *namespace}
|
||||||
|
}
|
52
vendor/github.com/coreos/clair/database/feature_type.go
generated
vendored
Normal file
52
vendor/github.com/coreos/clair/database/feature_type.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
// Copyright 2019 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FeatureType indicates the type of feature that a vulnerability
|
||||||
|
// affects.
|
||||||
|
type FeatureType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
SourcePackage FeatureType = "source"
|
||||||
|
BinaryPackage FeatureType = "binary"
|
||||||
|
)
|
||||||
|
|
||||||
|
var featureTypes = []FeatureType{
|
||||||
|
SourcePackage,
|
||||||
|
BinaryPackage,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan implements the database/sql.Scanner interface.
|
||||||
|
func (t *FeatureType) Scan(value interface{}) error {
|
||||||
|
val := value.(string)
|
||||||
|
for _, ft := range featureTypes {
|
||||||
|
if string(ft) == val {
|
||||||
|
*t = ft
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
panic(fmt.Sprintf("invalid feature type received from database: '%s'", val))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value implements the database/sql/driver.Valuer interface.
|
||||||
|
func (t *FeatureType) Value() (driver.Value, error) {
|
||||||
|
return string(*t), nil
|
||||||
|
}
|
65
vendor/github.com/coreos/clair/database/layer.go
generated
vendored
Normal file
65
vendor/github.com/coreos/clair/database/layer.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
// Copyright 2019 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
// Layer is a layer with all the detected features and namespaces.
|
||||||
|
type Layer struct {
|
||||||
|
// Hash is the sha-256 tarsum on the layer's blob content.
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
// By contains a list of detectors scanned this Layer.
|
||||||
|
By []Detector `json:"by"`
|
||||||
|
Namespaces []LayerNamespace `json:"namespaces"`
|
||||||
|
Features []LayerFeature `json:"features"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Layer) GetFeatures() []Feature {
|
||||||
|
features := make([]Feature, 0, len(l.Features))
|
||||||
|
for _, f := range l.Features {
|
||||||
|
features = append(features, f.Feature)
|
||||||
|
}
|
||||||
|
|
||||||
|
return features
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Layer) GetNamespaces() []Namespace {
|
||||||
|
namespaces := make([]Namespace, 0, len(l.Namespaces)+len(l.Features))
|
||||||
|
for _, ns := range l.Namespaces {
|
||||||
|
namespaces = append(namespaces, ns.Namespace)
|
||||||
|
}
|
||||||
|
for _, f := range l.Features {
|
||||||
|
if f.PotentialNamespace.Valid() {
|
||||||
|
namespaces = append(namespaces, f.PotentialNamespace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return namespaces
|
||||||
|
}
|
||||||
|
|
||||||
|
// LayerNamespace is a namespace with detection information.
|
||||||
|
type LayerNamespace struct {
|
||||||
|
Namespace `json:"namespace"`
|
||||||
|
|
||||||
|
// By is the detector found the namespace.
|
||||||
|
By Detector `json:"by"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// LayerFeature is a feature with detection information.
|
||||||
|
type LayerFeature struct {
|
||||||
|
Feature `json:"feature"`
|
||||||
|
|
||||||
|
// By is the detector found the feature.
|
||||||
|
By Detector `json:"by"`
|
||||||
|
PotentialNamespace Namespace `json:"potentialNamespace"`
|
||||||
|
}
|
41
vendor/github.com/coreos/clair/database/metadata.go
generated
vendored
Normal file
41
vendor/github.com/coreos/clair/database/metadata.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
// Copyright 2019 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetadataMap is for storing the metadata returned by vulnerability database.
|
||||||
|
type MetadataMap map[string]interface{}
|
||||||
|
|
||||||
|
func (mm *MetadataMap) Scan(value interface{}) error {
|
||||||
|
if value == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// github.com/lib/pq decodes TEXT/VARCHAR fields into strings.
|
||||||
|
val, ok := value.(string)
|
||||||
|
if !ok {
|
||||||
|
panic("got type other than []byte from database")
|
||||||
|
}
|
||||||
|
return json.Unmarshal([]byte(val), mm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mm *MetadataMap) Value() (driver.Value, error) {
|
||||||
|
json, err := json.Marshal(*mm)
|
||||||
|
return string(json), err
|
||||||
|
}
|
38
vendor/github.com/coreos/clair/database/mock.go
generated
vendored
38
vendor/github.com/coreos/clair/database/mock.go
generated
vendored
|
@ -30,9 +30,10 @@ type MockSession struct {
|
||||||
FctFindAffectedNamespacedFeatures func(features []NamespacedFeature) ([]NullableAffectedNamespacedFeature, error)
|
FctFindAffectedNamespacedFeatures func(features []NamespacedFeature) ([]NullableAffectedNamespacedFeature, error)
|
||||||
FctPersistNamespaces func([]Namespace) error
|
FctPersistNamespaces func([]Namespace) error
|
||||||
FctPersistFeatures func([]Feature) error
|
FctPersistFeatures func([]Feature) error
|
||||||
|
FctPersistDetectors func(detectors []Detector) error
|
||||||
FctPersistNamespacedFeatures func([]NamespacedFeature) error
|
FctPersistNamespacedFeatures func([]NamespacedFeature) error
|
||||||
FctCacheAffectedNamespacedFeatures func([]NamespacedFeature) error
|
FctCacheAffectedNamespacedFeatures func([]NamespacedFeature) error
|
||||||
FctPersistLayer func(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
FctPersistLayer func(hash string, features []LayerFeature, namespaces []LayerNamespace, by []Detector) error
|
||||||
FctFindLayer func(name string) (Layer, bool, error)
|
FctFindLayer func(name string) (Layer, bool, error)
|
||||||
FctInsertVulnerabilities func([]VulnerabilityWithAffected) error
|
FctInsertVulnerabilities func([]VulnerabilityWithAffected) error
|
||||||
FctFindVulnerabilities func([]VulnerabilityID) ([]NullableVulnerability, error)
|
FctFindVulnerabilities func([]VulnerabilityID) ([]NullableVulnerability, error)
|
||||||
|
@ -45,9 +46,9 @@ type MockSession struct {
|
||||||
FctDeleteNotification func(name string) error
|
FctDeleteNotification func(name string) error
|
||||||
FctUpdateKeyValue func(key, value string) error
|
FctUpdateKeyValue func(key, value string) error
|
||||||
FctFindKeyValue func(key string) (string, bool, error)
|
FctFindKeyValue func(key string) (string, bool, error)
|
||||||
FctLock func(name string, owner string, duration time.Duration, renew bool) (bool, time.Time, error)
|
FctAcquireLock func(name, owner string, duration time.Duration) (bool, time.Time, error)
|
||||||
FctUnlock func(name, owner string) error
|
FctExtendLock func(name, owner string, duration time.Duration) (bool, time.Time, error)
|
||||||
FctFindLock func(name string) (string, time.Time, bool, error)
|
FctReleaseLock func(name, owner string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MockSession) Commit() error {
|
func (ms *MockSession) Commit() error {
|
||||||
|
@ -85,6 +86,13 @@ func (ms *MockSession) FindAffectedNamespacedFeatures(features []NamespacedFeatu
|
||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ms *MockSession) PersistDetectors(detectors []Detector) error {
|
||||||
|
if ms.FctPersistDetectors != nil {
|
||||||
|
return ms.FctPersistDetectors(detectors)
|
||||||
|
}
|
||||||
|
panic("required mock function not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
func (ms *MockSession) PersistNamespaces(namespaces []Namespace) error {
|
func (ms *MockSession) PersistNamespaces(namespaces []Namespace) error {
|
||||||
if ms.FctPersistNamespaces != nil {
|
if ms.FctPersistNamespaces != nil {
|
||||||
return ms.FctPersistNamespaces(namespaces)
|
return ms.FctPersistNamespaces(namespaces)
|
||||||
|
@ -113,9 +121,9 @@ func (ms *MockSession) CacheAffectedNamespacedFeatures(namespacedFeatures []Name
|
||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MockSession) PersistLayer(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error {
|
func (ms *MockSession) PersistLayer(hash string, features []LayerFeature, namespaces []LayerNamespace, detectors []Detector) error {
|
||||||
if ms.FctPersistLayer != nil {
|
if ms.FctPersistLayer != nil {
|
||||||
return ms.FctPersistLayer(hash, namespaces, features, processedBy)
|
return ms.FctPersistLayer(hash, features, namespaces, detectors)
|
||||||
}
|
}
|
||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
@ -198,23 +206,23 @@ func (ms *MockSession) FindKeyValue(key string) (string, bool, error) {
|
||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MockSession) Lock(name string, owner string, duration time.Duration, renew bool) (bool, time.Time, error) {
|
func (ms *MockSession) AcquireLock(name, owner string, duration time.Duration) (bool, time.Time, error) {
|
||||||
if ms.FctLock != nil {
|
if ms.FctAcquireLock != nil {
|
||||||
return ms.FctLock(name, owner, duration, renew)
|
return ms.FctAcquireLock(name, owner, duration)
|
||||||
}
|
}
|
||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MockSession) Unlock(name, owner string) error {
|
func (ms *MockSession) ExtendLock(name, owner string, duration time.Duration) (bool, time.Time, error) {
|
||||||
if ms.FctUnlock != nil {
|
if ms.FctExtendLock != nil {
|
||||||
return ms.FctUnlock(name, owner)
|
return ms.FctExtendLock(name, owner, duration)
|
||||||
}
|
}
|
||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MockSession) FindLock(name string) (string, time.Time, bool, error) {
|
func (ms *MockSession) ReleaseLock(name, owner string) error {
|
||||||
if ms.FctFindLock != nil {
|
if ms.FctReleaseLock != nil {
|
||||||
return ms.FctFindLock(name)
|
return ms.FctReleaseLock(name, owner)
|
||||||
}
|
}
|
||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
|
239
vendor/github.com/coreos/clair/database/models.go
generated
vendored
239
vendor/github.com/coreos/clair/database/models.go
generated
vendored
|
@ -1,239 +0,0 @@
|
||||||
// Copyright 2017 clair authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package database
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql/driver"
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/coreos/clair/pkg/pagination"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Processors are extentions to scan a layer's content.
|
|
||||||
type Processors struct {
|
|
||||||
Listers []string
|
|
||||||
Detectors []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ancestry is a manifest that keeps all layers in an image in order.
|
|
||||||
type Ancestry struct {
|
|
||||||
Name string
|
|
||||||
// ProcessedBy contains the processors that are used when computing the
|
|
||||||
// content of this ancestry.
|
|
||||||
ProcessedBy Processors
|
|
||||||
// Layers should be ordered and i_th layer is the parent of i+1_th layer in
|
|
||||||
// the slice.
|
|
||||||
Layers []AncestryLayer
|
|
||||||
}
|
|
||||||
|
|
||||||
// AncestryLayer is a layer with all detected namespaced features.
|
|
||||||
type AncestryLayer struct {
|
|
||||||
LayerMetadata
|
|
||||||
|
|
||||||
// DetectedFeatures are the features introduced by this layer when it was
|
|
||||||
// processed.
|
|
||||||
DetectedFeatures []NamespacedFeature
|
|
||||||
}
|
|
||||||
|
|
||||||
// LayerMetadata contains the metadata of a layer.
|
|
||||||
type LayerMetadata struct {
|
|
||||||
// Hash is content hash of the layer.
|
|
||||||
Hash string
|
|
||||||
// ProcessedBy contains the processors that processed this layer.
|
|
||||||
ProcessedBy Processors
|
|
||||||
}
|
|
||||||
|
|
||||||
// Layer is a layer with its detected namespaces and features by
|
|
||||||
// ProcessedBy.
|
|
||||||
type Layer struct {
|
|
||||||
LayerMetadata
|
|
||||||
|
|
||||||
Namespaces []Namespace
|
|
||||||
Features []Feature
|
|
||||||
}
|
|
||||||
|
|
||||||
// Namespace is the contextual information around features.
|
|
||||||
//
|
|
||||||
// e.g. Debian:7, NodeJS.
|
|
||||||
type Namespace struct {
|
|
||||||
Name string
|
|
||||||
VersionFormat string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Feature represents a package detected in a layer but the namespace is not
|
|
||||||
// determined.
|
|
||||||
//
|
|
||||||
// e.g. Name: OpenSSL, Version: 1.0, VersionFormat: dpkg.
|
|
||||||
// dpkg is the version format of the installer package manager, which in this
|
|
||||||
// case could be dpkg or apk.
|
|
||||||
type Feature struct {
|
|
||||||
Name string
|
|
||||||
Version string
|
|
||||||
VersionFormat string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamespacedFeature is a feature with determined namespace and can be affected
|
|
||||||
// by vulnerabilities.
|
|
||||||
//
|
|
||||||
// e.g. OpenSSL 1.0 dpkg Debian:7.
|
|
||||||
type NamespacedFeature struct {
|
|
||||||
Feature
|
|
||||||
|
|
||||||
Namespace Namespace
|
|
||||||
}
|
|
||||||
|
|
||||||
// AffectedNamespacedFeature is a namespaced feature affected by the
|
|
||||||
// vulnerabilities with fixed-in versions for this feature.
|
|
||||||
type AffectedNamespacedFeature struct {
|
|
||||||
NamespacedFeature
|
|
||||||
|
|
||||||
AffectedBy []VulnerabilityWithFixedIn
|
|
||||||
}
|
|
||||||
|
|
||||||
// VulnerabilityWithFixedIn is used for AffectedNamespacedFeature to retrieve
|
|
||||||
// the affecting vulnerabilities and the fixed-in versions for the feature.
|
|
||||||
type VulnerabilityWithFixedIn struct {
|
|
||||||
Vulnerability
|
|
||||||
|
|
||||||
FixedInVersion string
|
|
||||||
}
|
|
||||||
|
|
||||||
// AffectedFeature is used to determine whether a namespaced feature is affected
|
|
||||||
// by a Vulnerability. Namespace and Feature Name is unique. Affected Feature is
|
|
||||||
// bound to vulnerability.
|
|
||||||
type AffectedFeature struct {
|
|
||||||
Namespace Namespace
|
|
||||||
FeatureName string
|
|
||||||
// FixedInVersion is known next feature version that's not affected by the
|
|
||||||
// vulnerability. Empty FixedInVersion means the unaffected version is
|
|
||||||
// unknown.
|
|
||||||
FixedInVersion string
|
|
||||||
// AffectedVersion contains the version range to determine whether or not a
|
|
||||||
// feature is affected.
|
|
||||||
AffectedVersion string
|
|
||||||
}
|
|
||||||
|
|
||||||
// VulnerabilityID is an identifier for every vulnerability. Every vulnerability
|
|
||||||
// has unique namespace and name.
|
|
||||||
type VulnerabilityID struct {
|
|
||||||
Name string
|
|
||||||
Namespace string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Vulnerability represents CVE or similar vulnerability reports.
|
|
||||||
type Vulnerability struct {
|
|
||||||
Name string
|
|
||||||
Namespace Namespace
|
|
||||||
|
|
||||||
Description string
|
|
||||||
Link string
|
|
||||||
Severity Severity
|
|
||||||
|
|
||||||
Metadata MetadataMap
|
|
||||||
}
|
|
||||||
|
|
||||||
// VulnerabilityWithAffected is an vulnerability with all known affected
|
|
||||||
// features.
|
|
||||||
type VulnerabilityWithAffected struct {
|
|
||||||
Vulnerability
|
|
||||||
|
|
||||||
Affected []AffectedFeature
|
|
||||||
}
|
|
||||||
|
|
||||||
// PagedVulnerableAncestries is a vulnerability with a page of affected
|
|
||||||
// ancestries each with a special index attached for streaming purpose. The
|
|
||||||
// current page number and next page number are for navigate.
|
|
||||||
type PagedVulnerableAncestries struct {
|
|
||||||
Vulnerability
|
|
||||||
|
|
||||||
// Affected is a map of special indexes to Ancestries, which the pair
|
|
||||||
// should be unique in a stream. Every indexes in the map should be larger
|
|
||||||
// than previous page.
|
|
||||||
Affected map[int]string
|
|
||||||
|
|
||||||
Limit int
|
|
||||||
Current pagination.Token
|
|
||||||
Next pagination.Token
|
|
||||||
|
|
||||||
// End signals the end of the pages.
|
|
||||||
End bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotificationHook is a message sent to another service to inform of a change
|
|
||||||
// to a Vulnerability or the Ancestries affected by a Vulnerability. It contains
|
|
||||||
// the name of a notification that should be read and marked as read via the
|
|
||||||
// API.
|
|
||||||
type NotificationHook struct {
|
|
||||||
Name string
|
|
||||||
|
|
||||||
Created time.Time
|
|
||||||
Notified time.Time
|
|
||||||
Deleted time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// VulnerabilityNotification is a notification for vulnerability changes.
|
|
||||||
type VulnerabilityNotification struct {
|
|
||||||
NotificationHook
|
|
||||||
|
|
||||||
Old *Vulnerability
|
|
||||||
New *Vulnerability
|
|
||||||
}
|
|
||||||
|
|
||||||
// VulnerabilityNotificationWithVulnerable is a notification for vulnerability
|
|
||||||
// changes with vulnerable ancestries.
|
|
||||||
type VulnerabilityNotificationWithVulnerable struct {
|
|
||||||
NotificationHook
|
|
||||||
|
|
||||||
Old *PagedVulnerableAncestries
|
|
||||||
New *PagedVulnerableAncestries
|
|
||||||
}
|
|
||||||
|
|
||||||
// MetadataMap is for storing the metadata returned by vulnerability database.
|
|
||||||
type MetadataMap map[string]interface{}
|
|
||||||
|
|
||||||
// NullableAffectedNamespacedFeature is an affectednamespacedfeature with
|
|
||||||
// whether it's found in datastore.
|
|
||||||
type NullableAffectedNamespacedFeature struct {
|
|
||||||
AffectedNamespacedFeature
|
|
||||||
|
|
||||||
Valid bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NullableVulnerability is a vulnerability with whether the vulnerability is
|
|
||||||
// found in datastore.
|
|
||||||
type NullableVulnerability struct {
|
|
||||||
VulnerabilityWithAffected
|
|
||||||
|
|
||||||
Valid bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mm *MetadataMap) Scan(value interface{}) error {
|
|
||||||
if value == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// github.com/lib/pq decodes TEXT/VARCHAR fields into strings.
|
|
||||||
val, ok := value.(string)
|
|
||||||
if !ok {
|
|
||||||
panic("got type other than []byte from database")
|
|
||||||
}
|
|
||||||
return json.Unmarshal([]byte(val), mm)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mm *MetadataMap) Value() (driver.Value, error) {
|
|
||||||
json, err := json.Marshal(*mm)
|
|
||||||
return string(json), err
|
|
||||||
}
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2013 Matt T. Proud
|
// Copyright 2019 clair authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -12,5 +12,23 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package pbutil provides record length-delimited Protocol Buffer streaming.
|
package database
|
||||||
package pbutil
|
|
||||||
|
// Namespace is the contextual information around features.
|
||||||
|
//
|
||||||
|
// e.g. Debian:7, NodeJS.
|
||||||
|
type Namespace struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
VersionFormat string `json:"versionFormat"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNamespace(name string, versionFormat string) *Namespace {
|
||||||
|
return &Namespace{name, versionFormat}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *Namespace) Valid() bool {
|
||||||
|
if ns.Name == "" || ns.VersionFormat == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
23
vendor/github.com/coreos/clair/database/namespace_mapping.go
generated
vendored
23
vendor/github.com/coreos/clair/database/namespace_mapping.go
generated
vendored
|
@ -17,18 +17,19 @@ package database
|
||||||
// DebianReleasesMapping translates Debian code names and class names to version numbers
|
// DebianReleasesMapping translates Debian code names and class names to version numbers
|
||||||
var DebianReleasesMapping = map[string]string{
|
var DebianReleasesMapping = map[string]string{
|
||||||
// Code names
|
// Code names
|
||||||
"squeeze": "6",
|
"squeeze": "6",
|
||||||
"wheezy": "7",
|
"wheezy": "7",
|
||||||
"jessie": "8",
|
"jessie": "8",
|
||||||
"stretch": "9",
|
"stretch": "9",
|
||||||
"buster": "10",
|
"buster": "10",
|
||||||
"sid": "unstable",
|
"bullseye": "11",
|
||||||
|
"sid": "unstable",
|
||||||
|
|
||||||
// Class names
|
// Class names
|
||||||
"oldoldstable": "7",
|
"oldoldstable": "8",
|
||||||
"oldstable": "8",
|
"oldstable": "9",
|
||||||
"stable": "9",
|
"stable": "10",
|
||||||
"testing": "10",
|
"testing": "11",
|
||||||
"unstable": "unstable",
|
"unstable": "unstable",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,4 +47,6 @@ var UbuntuReleasesMapping = map[string]string{
|
||||||
"zesty": "17.04",
|
"zesty": "17.04",
|
||||||
"artful": "17.10",
|
"artful": "17.10",
|
||||||
"bionic": "18.04",
|
"bionic": "18.04",
|
||||||
|
"cosmic": "18.10",
|
||||||
|
"disco": "19.04",
|
||||||
}
|
}
|
||||||
|
|
69
vendor/github.com/coreos/clair/database/notification.go
generated
vendored
Normal file
69
vendor/github.com/coreos/clair/database/notification.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
// Copyright 2019 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/clair/pkg/pagination"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NotificationHook is a message sent to another service to inform of a change
|
||||||
|
// to a Vulnerability or the Ancestries affected by a Vulnerability. It contains
|
||||||
|
// the name of a notification that should be read and marked as read via the
|
||||||
|
// API.
|
||||||
|
type NotificationHook struct {
|
||||||
|
Name string
|
||||||
|
|
||||||
|
Created time.Time
|
||||||
|
Notified time.Time
|
||||||
|
Deleted time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// VulnerabilityNotification is a notification for vulnerability changes.
|
||||||
|
type VulnerabilityNotification struct {
|
||||||
|
NotificationHook
|
||||||
|
|
||||||
|
Old *Vulnerability
|
||||||
|
New *Vulnerability
|
||||||
|
}
|
||||||
|
|
||||||
|
// VulnerabilityNotificationWithVulnerable is a notification for vulnerability
|
||||||
|
// changes with vulnerable ancestries.
|
||||||
|
type VulnerabilityNotificationWithVulnerable struct {
|
||||||
|
NotificationHook
|
||||||
|
|
||||||
|
Old *PagedVulnerableAncestries
|
||||||
|
New *PagedVulnerableAncestries
|
||||||
|
}
|
||||||
|
|
||||||
|
// PagedVulnerableAncestries is a vulnerability with a page of affected
|
||||||
|
// ancestries each with a special index attached for streaming purpose. The
|
||||||
|
// current page number and next page number are for navigate.
|
||||||
|
type PagedVulnerableAncestries struct {
|
||||||
|
Vulnerability
|
||||||
|
|
||||||
|
// Affected is a map of special indexes to Ancestries, which the pair
|
||||||
|
// should be unique in a stream. Every indexes in the map should be larger
|
||||||
|
// than previous page.
|
||||||
|
Affected map[int]string
|
||||||
|
|
||||||
|
Limit int
|
||||||
|
Current pagination.Token
|
||||||
|
Next pagination.Token
|
||||||
|
|
||||||
|
// End signals the end of the pages.
|
||||||
|
End bool
|
||||||
|
}
|
297
vendor/github.com/coreos/clair/database/testutil.go
generated
vendored
Normal file
297
vendor/github.com/coreos/clair/database/testutil.go
generated
vendored
Normal file
|
@ -0,0 +1,297 @@
|
||||||
|
// Copyright 2018 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/deckarep/golang-set"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AssertDetectorsEqual asserts actual detectors are content wise equal to
|
||||||
|
// expected detectors regardless of the ordering.
|
||||||
|
func AssertDetectorsEqual(t *testing.T, expected, actual []Detector) bool {
|
||||||
|
if len(expected) != len(actual) {
|
||||||
|
return assert.Fail(t, "detectors are not equal", "expected: '%v', actual: '%v'", expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(expected, func(i, j int) bool {
|
||||||
|
return expected[i].String() < expected[j].String()
|
||||||
|
})
|
||||||
|
|
||||||
|
sort.Slice(actual, func(i, j int) bool {
|
||||||
|
return actual[i].String() < actual[j].String()
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := range expected {
|
||||||
|
if expected[i] != actual[i] {
|
||||||
|
return assert.Fail(t, "detectors are not equal", "expected: '%v', actual: '%v'", expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertAncestryEqual asserts actual ancestry equals to expected ancestry
|
||||||
|
// content wise.
|
||||||
|
func AssertAncestryEqual(t *testing.T, expected, actual *Ancestry) bool {
|
||||||
|
if expected == actual {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if actual == nil || expected == nil {
|
||||||
|
return assert.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !assert.Equal(t, expected.Name, actual.Name) || !AssertDetectorsEqual(t, expected.By, actual.By) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if assert.Equal(t, len(expected.Layers), len(actual.Layers)) {
|
||||||
|
for index := range expected.Layers {
|
||||||
|
if !AssertAncestryLayerEqual(t, &expected.Layers[index], &actual.Layers[index]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertAncestryLayerEqual asserts actual ancestry layer equals to expected
|
||||||
|
// ancestry layer content wise.
|
||||||
|
func AssertAncestryLayerEqual(t *testing.T, expected, actual *AncestryLayer) bool {
|
||||||
|
if !assert.Equal(t, expected.Hash, actual.Hash) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !assert.Equal(t, len(expected.Features), len(actual.Features),
|
||||||
|
"layer: %s\nExpected: %v\n Actual: %v",
|
||||||
|
expected.Hash, expected.Features, actual.Features,
|
||||||
|
) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// feature -> is in actual layer
|
||||||
|
hitCounter := map[AncestryFeature]bool{}
|
||||||
|
for _, f := range expected.Features {
|
||||||
|
hitCounter[f] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there's no extra features and no duplicated features, since expected
|
||||||
|
// and actual have the same length, their result must equal.
|
||||||
|
for _, f := range actual.Features {
|
||||||
|
v, ok := hitCounter[f]
|
||||||
|
assert.True(t, ok, "unexpected feature %s", f)
|
||||||
|
assert.False(t, v, "duplicated feature %s", f)
|
||||||
|
hitCounter[f] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for f, visited := range hitCounter {
|
||||||
|
assert.True(t, visited, "missing feature %s", f)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertElementsEqual asserts that content in actual equals to content in
|
||||||
|
// expected array regardless of ordering.
|
||||||
|
//
|
||||||
|
// Note: This function uses interface wise comparison.
|
||||||
|
func AssertElementsEqual(t *testing.T, expected, actual []interface{}) bool {
|
||||||
|
counter := map[interface{}]bool{}
|
||||||
|
for _, f := range expected {
|
||||||
|
counter[f] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range actual {
|
||||||
|
v, ok := counter[f]
|
||||||
|
if !assert.True(t, ok, "unexpected element %v\nExpected: %v\n Actual: %v\n", f, expected, actual) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !assert.False(t, v, "duplicated element %v\nExpected: %v\n Actual: %v\n", f, expected, actual) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
counter[f] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for f, visited := range counter {
|
||||||
|
if !assert.True(t, visited, "missing feature %v\nExpected: %v\n Actual: %v\n", f, expected, actual) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertFeaturesEqual asserts content in actual equals content in expected
|
||||||
|
// regardless of ordering.
|
||||||
|
func AssertFeaturesEqual(t *testing.T, expected, actual []Feature) bool {
|
||||||
|
if assert.Len(t, actual, len(expected)) {
|
||||||
|
has := map[Feature]bool{}
|
||||||
|
for _, nf := range expected {
|
||||||
|
has[nf] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, nf := range actual {
|
||||||
|
has[nf] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for nf, visited := range has {
|
||||||
|
if !assert.True(t, visited, nf.Name+" is expected") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertLayerFeaturesEqual asserts content in actual equals to content in
|
||||||
|
// expected regardless of ordering.
|
||||||
|
func AssertLayerFeaturesEqual(t *testing.T, expected, actual []LayerFeature) bool {
|
||||||
|
if !assert.Len(t, actual, len(expected)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedInterfaces := []interface{}{}
|
||||||
|
for _, e := range expected {
|
||||||
|
expectedInterfaces = append(expectedInterfaces, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
actualInterfaces := []interface{}{}
|
||||||
|
for _, a := range actual {
|
||||||
|
actualInterfaces = append(actualInterfaces, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
return AssertElementsEqual(t, expectedInterfaces, actualInterfaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertNamespacesEqual asserts content in actual equals to content in
|
||||||
|
// expected regardless of ordering.
|
||||||
|
func AssertNamespacesEqual(t *testing.T, expected, actual []Namespace) bool {
|
||||||
|
expectedInterfaces := []interface{}{}
|
||||||
|
for _, e := range expected {
|
||||||
|
expectedInterfaces = append(expectedInterfaces, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
actualInterfaces := []interface{}{}
|
||||||
|
for _, a := range actual {
|
||||||
|
actualInterfaces = append(actualInterfaces, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
return AssertElementsEqual(t, expectedInterfaces, actualInterfaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertLayerNamespacesEqual asserts content in actual equals to content in
|
||||||
|
// expected regardless of ordering.
|
||||||
|
func AssertLayerNamespacesEqual(t *testing.T, expected, actual []LayerNamespace) bool {
|
||||||
|
expectedInterfaces := []interface{}{}
|
||||||
|
for _, e := range expected {
|
||||||
|
expectedInterfaces = append(expectedInterfaces, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
actualInterfaces := []interface{}{}
|
||||||
|
for _, a := range actual {
|
||||||
|
actualInterfaces = append(actualInterfaces, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
return AssertElementsEqual(t, expectedInterfaces, actualInterfaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertLayerEqual asserts actual layer equals to expected layer content wise.
|
||||||
|
func AssertLayerEqual(t *testing.T, expected, actual *Layer) bool {
|
||||||
|
if expected == actual {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected == nil || actual == nil {
|
||||||
|
return assert.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
return assert.Equal(t, expected.Hash, actual.Hash) &&
|
||||||
|
AssertDetectorsEqual(t, expected.By, actual.By) &&
|
||||||
|
AssertLayerFeaturesEqual(t, expected.Features, actual.Features) &&
|
||||||
|
AssertLayerNamespacesEqual(t, expected.Namespaces, actual.Namespaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertIntStringMapEqual asserts two maps with integer as key and string as
|
||||||
|
// value are equal.
|
||||||
|
func AssertIntStringMapEqual(t *testing.T, expected, actual map[int]string) bool {
|
||||||
|
checked := mapset.NewSet()
|
||||||
|
for k, v := range expected {
|
||||||
|
assert.Equal(t, v, actual[k])
|
||||||
|
checked.Add(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range actual {
|
||||||
|
if !assert.True(t, checked.Contains(k)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertVulnerabilityEqual asserts two vulnerabilities are equal.
|
||||||
|
func AssertVulnerabilityEqual(t *testing.T, expected, actual *Vulnerability) bool {
|
||||||
|
return assert.Equal(t, expected.Name, actual.Name) &&
|
||||||
|
assert.Equal(t, expected.Link, actual.Link) &&
|
||||||
|
assert.Equal(t, expected.Description, actual.Description) &&
|
||||||
|
assert.Equal(t, expected.Namespace, actual.Namespace) &&
|
||||||
|
assert.Equal(t, expected.Severity, actual.Severity) &&
|
||||||
|
AssertMetadataMapEqual(t, expected.Metadata, actual.Metadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
func castMetadataMapToInterface(metadata MetadataMap) map[string]interface{} {
|
||||||
|
content, err := json.Marshal(metadata)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make(map[string]interface{})
|
||||||
|
if err := json.Unmarshal(content, &data); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertMetadataMapEqual asserts two metadata maps are equal.
|
||||||
|
func AssertMetadataMapEqual(t *testing.T, expected, actual MetadataMap) bool {
|
||||||
|
expectedMap := castMetadataMapToInterface(expected)
|
||||||
|
actualMap := castMetadataMapToInterface(actual)
|
||||||
|
checked := mapset.NewSet()
|
||||||
|
for k, v := range expectedMap {
|
||||||
|
if !assert.Equal(t, v, (actualMap)[k]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
checked.Add(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range actual {
|
||||||
|
if !assert.True(t, checked.Contains(k)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
50
vendor/github.com/coreos/clair/database/vulnerability.go
generated
vendored
Normal file
50
vendor/github.com/coreos/clair/database/vulnerability.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2019 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
// VulnerabilityID is an identifier for every vulnerability. Every vulnerability
|
||||||
|
// has unique namespace and name.
|
||||||
|
type VulnerabilityID struct {
|
||||||
|
Name string
|
||||||
|
Namespace string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Vulnerability represents CVE or similar vulnerability reports.
|
||||||
|
type Vulnerability struct {
|
||||||
|
Name string
|
||||||
|
Namespace Namespace
|
||||||
|
|
||||||
|
Description string
|
||||||
|
Link string
|
||||||
|
Severity Severity
|
||||||
|
|
||||||
|
Metadata MetadataMap
|
||||||
|
}
|
||||||
|
|
||||||
|
// VulnerabilityWithAffected is a vulnerability with all known affected
|
||||||
|
// features.
|
||||||
|
type VulnerabilityWithAffected struct {
|
||||||
|
Vulnerability
|
||||||
|
|
||||||
|
Affected []AffectedFeature
|
||||||
|
}
|
||||||
|
|
||||||
|
// NullableVulnerability is a vulnerability with whether the vulnerability is
|
||||||
|
// found in datastore.
|
||||||
|
type NullableVulnerability struct {
|
||||||
|
VulnerabilityWithAffected
|
||||||
|
|
||||||
|
Valid bool
|
||||||
|
}
|
67
vendor/github.com/coreos/clair/pkg/commonerr/errors.go
generated
vendored
Normal file
67
vendor/github.com/coreos/clair/pkg/commonerr/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
// Copyright 2017 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package commonerr defines reusable error types common throughout the Clair
|
||||||
|
// codebase.
|
||||||
|
package commonerr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrFilesystem occurs when a filesystem interaction fails.
|
||||||
|
ErrFilesystem = errors.New("something went wrong when interacting with the fs")
|
||||||
|
|
||||||
|
// ErrCouldNotDownload occurs when a download fails.
|
||||||
|
ErrCouldNotDownload = errors.New("could not download requested resource")
|
||||||
|
|
||||||
|
// ErrNotFound occurs when a resource could not be found.
|
||||||
|
ErrNotFound = errors.New("the resource cannot be found")
|
||||||
|
|
||||||
|
// ErrCouldNotParse is returned when a fetcher fails to parse the update data.
|
||||||
|
ErrCouldNotParse = errors.New("updater/fetchers: could not parse")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrBadRequest occurs when a method has been passed an inappropriate argument.
|
||||||
|
type ErrBadRequest struct {
|
||||||
|
s string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBadRequestError instantiates a ErrBadRequest with the specified message.
|
||||||
|
func NewBadRequestError(message string) error {
|
||||||
|
return &ErrBadRequest{s: message}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ErrBadRequest) Error() string {
|
||||||
|
return e.s
|
||||||
|
}
|
||||||
|
|
||||||
|
// CombineErrors merges a slice of errors into one separated by ";". If all
|
||||||
|
// errors are nil, return nil.
|
||||||
|
func CombineErrors(errs ...error) error {
|
||||||
|
errStr := []string{}
|
||||||
|
for i, err := range errs {
|
||||||
|
if err != nil {
|
||||||
|
errStr = append(errStr, fmt.Sprintf("[%d] %s", i, err.Error()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errStr) != 0 {
|
||||||
|
return errors.New(strings.Join(errStr, ";"))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
ISC License
|
||||||
|
|
||||||
|
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
||||||
|
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||||
|
//
|
||||||
|
// Permission to use, copy, modify, and distribute this software for any
|
||||||
|
// purpose with or without fee is hereby granted, provided that the above
|
||||||
|
// copyright notice and this permission notice appear in all copies.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
|
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||||
|
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||||
|
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||||
|
// tag is deprecated and thus should not be used.
|
||||||
|
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||||
|
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||||
|
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||||
|
// not access to the unsafe package is available.
|
||||||
|
UnsafeDisabled = false
|
||||||
|
|
||||||
|
// ptrSize is the size of a pointer on the current arch.
|
||||||
|
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
type flag uintptr
|
||||||
|
|
||||||
|
var (
|
||||||
|
// flagRO indicates whether the value field of a reflect.Value
|
||||||
|
// is read-only.
|
||||||
|
flagRO flag
|
||||||
|
|
||||||
|
// flagAddr indicates whether the address of the reflect.Value's
|
||||||
|
// value may be taken.
|
||||||
|
flagAddr flag
|
||||||
|
)
|
||||||
|
|
||||||
|
// flagKindMask holds the bits that make up the kind
|
||||||
|
// part of the flags field. In all the supported versions,
|
||||||
|
// it is in the lower 5 bits.
|
||||||
|
const flagKindMask = flag(0x1f)
|
||||||
|
|
||||||
|
// Different versions of Go have used different
|
||||||
|
// bit layouts for the flags type. This table
|
||||||
|
// records the known combinations.
|
||||||
|
var okFlags = []struct {
|
||||||
|
ro, addr flag
|
||||||
|
}{{
|
||||||
|
// From Go 1.4 to 1.5
|
||||||
|
ro: 1 << 5,
|
||||||
|
addr: 1 << 7,
|
||||||
|
}, {
|
||||||
|
// Up to Go tip.
|
||||||
|
ro: 1<<5 | 1<<6,
|
||||||
|
addr: 1 << 8,
|
||||||
|
}}
|
||||||
|
|
||||||
|
var flagValOffset = func() uintptr {
|
||||||
|
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||||
|
if !ok {
|
||||||
|
panic("reflect.Value has no flag field")
|
||||||
|
}
|
||||||
|
return field.Offset
|
||||||
|
}()
|
||||||
|
|
||||||
|
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||||
|
func flagField(v *reflect.Value) *flag {
|
||||||
|
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||||
|
// the typical safety restrictions preventing access to unaddressable and
|
||||||
|
// unexported data. It works by digging the raw pointer to the underlying
|
||||||
|
// value out of the protected value and generating a new unprotected (unsafe)
|
||||||
|
// reflect.Value to it.
|
||||||
|
//
|
||||||
|
// This allows us to check for implementations of the Stringer and error
|
||||||
|
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||||
|
// inaccessible values such as unexported struct fields.
|
||||||
|
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||||
|
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
flagFieldPtr := flagField(&v)
|
||||||
|
*flagFieldPtr &^= flagRO
|
||||||
|
*flagFieldPtr |= flagAddr
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity checks against future reflect package changes
|
||||||
|
// to the type or semantics of the Value.flag field.
|
||||||
|
func init() {
|
||||||
|
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||||
|
if !ok {
|
||||||
|
panic("reflect.Value has no flag field")
|
||||||
|
}
|
||||||
|
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||||
|
panic("reflect.Value flag field has changed kind")
|
||||||
|
}
|
||||||
|
type t0 int
|
||||||
|
var t struct {
|
||||||
|
A t0
|
||||||
|
// t0 will have flagEmbedRO set.
|
||||||
|
t0
|
||||||
|
// a will have flagStickyRO set
|
||||||
|
a t0
|
||||||
|
}
|
||||||
|
vA := reflect.ValueOf(t).FieldByName("A")
|
||||||
|
va := reflect.ValueOf(t).FieldByName("a")
|
||||||
|
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||||
|
|
||||||
|
// Infer flagRO from the difference between the flags
|
||||||
|
// for the (otherwise identical) fields in t.
|
||||||
|
flagPublic := *flagField(&vA)
|
||||||
|
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||||
|
flagRO = flagPublic ^ flagWithRO
|
||||||
|
|
||||||
|
// Infer flagAddr from the difference between a value
|
||||||
|
// taken from a pointer and not.
|
||||||
|
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||||
|
flagNoPtr := *flagField(&vA)
|
||||||
|
flagPtr := *flagField(&vPtrA)
|
||||||
|
flagAddr = flagNoPtr ^ flagPtr
|
||||||
|
|
||||||
|
// Check that the inferred flags tally with one of the known versions.
|
||||||
|
for _, f := range okFlags {
|
||||||
|
if flagRO == f.ro && flagAddr == f.addr {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("reflect.Value read-only flag has changed semantics")
|
||||||
|
}
|
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||||
|
//
|
||||||
|
// Permission to use, copy, modify, and distribute this software for any
|
||||||
|
// purpose with or without fee is hereby granted, provided that the above
|
||||||
|
// copyright notice and this permission notice appear in all copies.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
|
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||||
|
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||||
|
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||||
|
// tag is deprecated and thus should not be used.
|
||||||
|
// +build js appengine safe disableunsafe !go1.4
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||||
|
// not access to the unsafe package is available.
|
||||||
|
UnsafeDisabled = true
|
||||||
|
)
|
||||||
|
|
||||||
|
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||||
|
// that bypasses the typical safety restrictions preventing access to
|
||||||
|
// unaddressable and unexported data. However, doing this relies on access to
|
||||||
|
// the unsafe package. This is a stub version which simply returns the passed
|
||||||
|
// reflect.Value when the unsafe package is not available.
|
||||||
|
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||||
|
return v
|
||||||
|
}
|
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
|
@ -0,0 +1,341 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||||
|
// the technique used in the fmt package.
|
||||||
|
var (
|
||||||
|
panicBytes = []byte("(PANIC=")
|
||||||
|
plusBytes = []byte("+")
|
||||||
|
iBytes = []byte("i")
|
||||||
|
trueBytes = []byte("true")
|
||||||
|
falseBytes = []byte("false")
|
||||||
|
interfaceBytes = []byte("(interface {})")
|
||||||
|
commaNewlineBytes = []byte(",\n")
|
||||||
|
newlineBytes = []byte("\n")
|
||||||
|
openBraceBytes = []byte("{")
|
||||||
|
openBraceNewlineBytes = []byte("{\n")
|
||||||
|
closeBraceBytes = []byte("}")
|
||||||
|
asteriskBytes = []byte("*")
|
||||||
|
colonBytes = []byte(":")
|
||||||
|
colonSpaceBytes = []byte(": ")
|
||||||
|
openParenBytes = []byte("(")
|
||||||
|
closeParenBytes = []byte(")")
|
||||||
|
spaceBytes = []byte(" ")
|
||||||
|
pointerChainBytes = []byte("->")
|
||||||
|
nilAngleBytes = []byte("<nil>")
|
||||||
|
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||||
|
maxShortBytes = []byte("<max>")
|
||||||
|
circularBytes = []byte("<already shown>")
|
||||||
|
circularShortBytes = []byte("<shown>")
|
||||||
|
invalidAngleBytes = []byte("<invalid>")
|
||||||
|
openBracketBytes = []byte("[")
|
||||||
|
closeBracketBytes = []byte("]")
|
||||||
|
percentBytes = []byte("%")
|
||||||
|
precisionBytes = []byte(".")
|
||||||
|
openAngleBytes = []byte("<")
|
||||||
|
closeAngleBytes = []byte(">")
|
||||||
|
openMapBytes = []byte("map[")
|
||||||
|
closeMapBytes = []byte("]")
|
||||||
|
lenEqualsBytes = []byte("len=")
|
||||||
|
capEqualsBytes = []byte("cap=")
|
||||||
|
)
|
||||||
|
|
||||||
|
// hexDigits is used to map a decimal value to a hex digit.
|
||||||
|
var hexDigits = "0123456789abcdef"
|
||||||
|
|
||||||
|
// catchPanic handles any panics that might occur during the handleMethods
|
||||||
|
// calls.
|
||||||
|
func catchPanic(w io.Writer, v reflect.Value) {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
w.Write(panicBytes)
|
||||||
|
fmt.Fprintf(w, "%v", err)
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleMethods attempts to call the Error and String methods on the underlying
|
||||||
|
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||||
|
//
|
||||||
|
// It handles panics in any called methods by catching and displaying the error
|
||||||
|
// as the formatted value.
|
||||||
|
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||||
|
// We need an interface to check if the type implements the error or
|
||||||
|
// Stringer interface. However, the reflect package won't give us an
|
||||||
|
// interface on certain things like unexported struct fields in order
|
||||||
|
// to enforce visibility rules. We use unsafe, when it's available,
|
||||||
|
// to bypass these restrictions since this package does not mutate the
|
||||||
|
// values.
|
||||||
|
if !v.CanInterface() {
|
||||||
|
if UnsafeDisabled {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
v = unsafeReflectValue(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Choose whether or not to do error and Stringer interface lookups against
|
||||||
|
// the base type or a pointer to the base type depending on settings.
|
||||||
|
// Technically calling one of these methods with a pointer receiver can
|
||||||
|
// mutate the value, however, types which choose to satisify an error or
|
||||||
|
// Stringer interface with a pointer receiver should not be mutating their
|
||||||
|
// state inside these interface methods.
|
||||||
|
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||||
|
v = unsafeReflectValue(v)
|
||||||
|
}
|
||||||
|
if v.CanAddr() {
|
||||||
|
v = v.Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is it an error or Stringer?
|
||||||
|
switch iface := v.Interface().(type) {
|
||||||
|
case error:
|
||||||
|
defer catchPanic(w, v)
|
||||||
|
if cs.ContinueOnMethod {
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(iface.Error()))
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Write([]byte(iface.Error()))
|
||||||
|
return true
|
||||||
|
|
||||||
|
case fmt.Stringer:
|
||||||
|
defer catchPanic(w, v)
|
||||||
|
if cs.ContinueOnMethod {
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(iface.String()))
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
w.Write([]byte(iface.String()))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// printBool outputs a boolean value as true or false to Writer w.
|
||||||
|
func printBool(w io.Writer, val bool) {
|
||||||
|
if val {
|
||||||
|
w.Write(trueBytes)
|
||||||
|
} else {
|
||||||
|
w.Write(falseBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// printInt outputs a signed integer value to Writer w.
|
||||||
|
func printInt(w io.Writer, val int64, base int) {
|
||||||
|
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printUint outputs an unsigned integer value to Writer w.
|
||||||
|
func printUint(w io.Writer, val uint64, base int) {
|
||||||
|
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printFloat outputs a floating point value using the specified precision,
|
||||||
|
// which is expected to be 32 or 64bit, to Writer w.
|
||||||
|
func printFloat(w io.Writer, val float64, precision int) {
|
||||||
|
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printComplex outputs a complex value using the specified float precision
|
||||||
|
// for the real and imaginary parts to Writer w.
|
||||||
|
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||||
|
r := real(c)
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||||
|
i := imag(c)
|
||||||
|
if i >= 0 {
|
||||||
|
w.Write(plusBytes)
|
||||||
|
}
|
||||||
|
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||||
|
w.Write(iBytes)
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||||
|
// prefix to Writer w.
|
||||||
|
func printHexPtr(w io.Writer, p uintptr) {
|
||||||
|
// Null pointer.
|
||||||
|
num := uint64(p)
|
||||||
|
if num == 0 {
|
||||||
|
w.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||||
|
buf := make([]byte, 18)
|
||||||
|
|
||||||
|
// It's simpler to construct the hex string right to left.
|
||||||
|
base := uint64(16)
|
||||||
|
i := len(buf) - 1
|
||||||
|
for num >= base {
|
||||||
|
buf[i] = hexDigits[num%base]
|
||||||
|
num /= base
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
buf[i] = hexDigits[num]
|
||||||
|
|
||||||
|
// Add '0x' prefix.
|
||||||
|
i--
|
||||||
|
buf[i] = 'x'
|
||||||
|
i--
|
||||||
|
buf[i] = '0'
|
||||||
|
|
||||||
|
// Strip unused leading bytes.
|
||||||
|
buf = buf[i:]
|
||||||
|
w.Write(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||||
|
// elements to be sorted.
|
||||||
|
type valuesSorter struct {
|
||||||
|
values []reflect.Value
|
||||||
|
strings []string // either nil or same len and values
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||||
|
// surrogate keys on which the data should be sorted. It uses flags in
|
||||||
|
// ConfigState to decide if and how to populate those surrogate keys.
|
||||||
|
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||||
|
vs := &valuesSorter{values: values, cs: cs}
|
||||||
|
if canSortSimply(vs.values[0].Kind()) {
|
||||||
|
return vs
|
||||||
|
}
|
||||||
|
if !cs.DisableMethods {
|
||||||
|
vs.strings = make([]string, len(values))
|
||||||
|
for i := range vs.values {
|
||||||
|
b := bytes.Buffer{}
|
||||||
|
if !handleMethods(cs, &b, vs.values[i]) {
|
||||||
|
vs.strings = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
vs.strings[i] = b.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if vs.strings == nil && cs.SpewKeys {
|
||||||
|
vs.strings = make([]string, len(values))
|
||||||
|
for i := range vs.values {
|
||||||
|
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vs
|
||||||
|
}
|
||||||
|
|
||||||
|
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||||
|
// directly, or whether it should be considered for sorting by surrogate keys
|
||||||
|
// (if the ConfigState allows it).
|
||||||
|
func canSortSimply(kind reflect.Kind) bool {
|
||||||
|
// This switch parallels valueSortLess, except for the default case.
|
||||||
|
switch kind {
|
||||||
|
case reflect.Bool:
|
||||||
|
return true
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return true
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
return true
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return true
|
||||||
|
case reflect.String:
|
||||||
|
return true
|
||||||
|
case reflect.Uintptr:
|
||||||
|
return true
|
||||||
|
case reflect.Array:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of values in the slice. It is part of the
|
||||||
|
// sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Len() int {
|
||||||
|
return len(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap swaps the values at the passed indices. It is part of the
|
||||||
|
// sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Swap(i, j int) {
|
||||||
|
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||||
|
if s.strings != nil {
|
||||||
|
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// valueSortLess returns whether the first value should sort before the second
|
||||||
|
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||||
|
// implementation.
|
||||||
|
func valueSortLess(a, b reflect.Value) bool {
|
||||||
|
switch a.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
return !a.Bool() && b.Bool()
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return a.Int() < b.Int()
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
return a.Uint() < b.Uint()
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return a.Float() < b.Float()
|
||||||
|
case reflect.String:
|
||||||
|
return a.String() < b.String()
|
||||||
|
case reflect.Uintptr:
|
||||||
|
return a.Uint() < b.Uint()
|
||||||
|
case reflect.Array:
|
||||||
|
// Compare the contents of both arrays.
|
||||||
|
l := a.Len()
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
av := a.Index(i)
|
||||||
|
bv := b.Index(i)
|
||||||
|
if av.Interface() == bv.Interface() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return valueSortLess(av, bv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a.String() < b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Less returns whether the value at index i should sort before the
|
||||||
|
// value at index j. It is part of the sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Less(i, j int) bool {
|
||||||
|
if s.strings == nil {
|
||||||
|
return valueSortLess(s.values[i], s.values[j])
|
||||||
|
}
|
||||||
|
return s.strings[i] < s.strings[j]
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortValues is a sort function that handles both native types and any type that
|
||||||
|
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||||
|
// their Value.String() value to ensure display stability.
|
||||||
|
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||||
|
if len(values) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sort.Sort(newValuesSorter(values, cs))
|
||||||
|
}
|
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
|
@ -0,0 +1,306 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigState houses the configuration options used by spew to format and
|
||||||
|
// display values. There is a global instance, Config, that is used to control
|
||||||
|
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||||
|
// provides methods equivalent to the top-level functions.
|
||||||
|
//
|
||||||
|
// The zero value for ConfigState provides no indentation. You would typically
|
||||||
|
// want to set it to a space or a tab.
|
||||||
|
//
|
||||||
|
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||||
|
// with default settings. See the documentation of NewDefaultConfig for default
|
||||||
|
// values.
|
||||||
|
type ConfigState struct {
|
||||||
|
// Indent specifies the string to use for each indentation level. The
|
||||||
|
// global config instance that all top-level functions use set this to a
|
||||||
|
// single space by default. If you would like more indentation, you might
|
||||||
|
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||||
|
Indent string
|
||||||
|
|
||||||
|
// MaxDepth controls the maximum number of levels to descend into nested
|
||||||
|
// data structures. The default, 0, means there is no limit.
|
||||||
|
//
|
||||||
|
// NOTE: Circular data structures are properly detected, so it is not
|
||||||
|
// necessary to set this value unless you specifically want to limit deeply
|
||||||
|
// nested data structures.
|
||||||
|
MaxDepth int
|
||||||
|
|
||||||
|
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||||
|
// invoked for types that implement them.
|
||||||
|
DisableMethods bool
|
||||||
|
|
||||||
|
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||||
|
// error and Stringer interfaces on types which only accept a pointer
|
||||||
|
// receiver when the current type is not a pointer.
|
||||||
|
//
|
||||||
|
// NOTE: This might be an unsafe action since calling one of these methods
|
||||||
|
// with a pointer receiver could technically mutate the value, however,
|
||||||
|
// in practice, types which choose to satisify an error or Stringer
|
||||||
|
// interface with a pointer receiver should not be mutating their state
|
||||||
|
// inside these interface methods. As a result, this option relies on
|
||||||
|
// access to the unsafe package, so it will not have any effect when
|
||||||
|
// running in environments without access to the unsafe package such as
|
||||||
|
// Google App Engine or with the "safe" build tag specified.
|
||||||
|
DisablePointerMethods bool
|
||||||
|
|
||||||
|
// DisablePointerAddresses specifies whether to disable the printing of
|
||||||
|
// pointer addresses. This is useful when diffing data structures in tests.
|
||||||
|
DisablePointerAddresses bool
|
||||||
|
|
||||||
|
// DisableCapacities specifies whether to disable the printing of capacities
|
||||||
|
// for arrays, slices, maps and channels. This is useful when diffing
|
||||||
|
// data structures in tests.
|
||||||
|
DisableCapacities bool
|
||||||
|
|
||||||
|
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||||
|
// a custom error or Stringer interface is invoked. The default, false,
|
||||||
|
// means it will print the results of invoking the custom error or Stringer
|
||||||
|
// interface and return immediately instead of continuing to recurse into
|
||||||
|
// the internals of the data type.
|
||||||
|
//
|
||||||
|
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||||
|
// via the DisableMethods or DisablePointerMethods options.
|
||||||
|
ContinueOnMethod bool
|
||||||
|
|
||||||
|
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||||
|
// this to have a more deterministic, diffable output. Note that only
|
||||||
|
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||||
|
// that support the error or Stringer interfaces (if methods are
|
||||||
|
// enabled) are supported, with other types sorted according to the
|
||||||
|
// reflect.Value.String() output which guarantees display stability.
|
||||||
|
SortKeys bool
|
||||||
|
|
||||||
|
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||||
|
// be spewed to strings and sorted by those strings. This is only
|
||||||
|
// considered if SortKeys is true.
|
||||||
|
SpewKeys bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is the active configuration of the top-level functions.
|
||||||
|
// The configuration can be changed by modifying the contents of spew.Config.
|
||||||
|
var Config = ConfigState{Indent: " "}
|
||||||
|
|
||||||
|
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the formatted string as a value that satisfies error. See NewFormatter
|
||||||
|
// for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||||
|
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Print(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Printf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Println(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||||
|
return fmt.Sprint(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||||
|
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||||
|
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||||
|
return fmt.Sprintln(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||||
|
interface. As a result, it integrates cleanly with standard fmt package
|
||||||
|
printing functions. The formatter is useful for inline printing of smaller data
|
||||||
|
types similar to the standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Typically this function shouldn't be called directly. It is much easier to make
|
||||||
|
use of the custom formatter by calling one of the convenience functions such as
|
||||||
|
c.Printf, c.Println, or c.Printf.
|
||||||
|
*/
|
||||||
|
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||||
|
return newFormatter(c, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||||
|
// exactly the same as Dump.
|
||||||
|
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||||
|
fdump(c, w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Dump displays the passed parameters to standard out with newlines, customizable
|
||||||
|
indentation, and additional debug information such as complete types and all
|
||||||
|
pointer addresses used to indirect to the final value. It provides the
|
||||||
|
following features over the built-in printing facilities provided by the fmt
|
||||||
|
package:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output
|
||||||
|
|
||||||
|
The configuration options are controlled by modifying the public members
|
||||||
|
of c. See ConfigState for options documentation.
|
||||||
|
|
||||||
|
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||||
|
get the formatted result as a string.
|
||||||
|
*/
|
||||||
|
func (c *ConfigState) Dump(a ...interface{}) {
|
||||||
|
fdump(c, os.Stdout, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||||
|
// as Dump.
|
||||||
|
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fdump(c, &buf, a...)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||||
|
// length with each argument converted to a spew Formatter interface using
|
||||||
|
// the ConfigState associated with s.
|
||||||
|
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||||
|
formatters = make([]interface{}, len(args))
|
||||||
|
for index, arg := range args {
|
||||||
|
formatters[index] = newFormatter(c, arg)
|
||||||
|
}
|
||||||
|
return formatters
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||||
|
//
|
||||||
|
// Indent: " "
|
||||||
|
// MaxDepth: 0
|
||||||
|
// DisableMethods: false
|
||||||
|
// DisablePointerMethods: false
|
||||||
|
// ContinueOnMethod: false
|
||||||
|
// SortKeys: false
|
||||||
|
func NewDefaultConfig() *ConfigState {
|
||||||
|
return &ConfigState{Indent: " "}
|
||||||
|
}
|
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,211 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||||
|
debugging.
|
||||||
|
|
||||||
|
A quick overview of the additional features spew provides over the built-in
|
||||||
|
printing facilities for Go data types are as follows:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output (only when using
|
||||||
|
Dump style)
|
||||||
|
|
||||||
|
There are two different approaches spew allows for dumping Go data structures:
|
||||||
|
|
||||||
|
* Dump style which prints with newlines, customizable indentation,
|
||||||
|
and additional debug information such as types and all pointer addresses
|
||||||
|
used to indirect to the final value
|
||||||
|
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||||
|
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||||
|
similar to the default %v while providing the additional functionality
|
||||||
|
outlined above and passing unsupported format verbs such as %x and %q
|
||||||
|
along to fmt
|
||||||
|
|
||||||
|
Quick Start
|
||||||
|
|
||||||
|
This section demonstrates how to quickly get started with spew. See the
|
||||||
|
sections below for further details on formatting and configuration options.
|
||||||
|
|
||||||
|
To dump a variable with full newlines, indentation, type, and pointer
|
||||||
|
information use Dump, Fdump, or Sdump:
|
||||||
|
spew.Dump(myVar1, myVar2, ...)
|
||||||
|
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||||
|
str := spew.Sdump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||||
|
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||||
|
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||||
|
%#+v (adds types and pointer addresses):
|
||||||
|
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
|
||||||
|
Configuration Options
|
||||||
|
|
||||||
|
Configuration of spew is handled by fields in the ConfigState type. For
|
||||||
|
convenience, all of the top-level functions use a global state available
|
||||||
|
via the spew.Config global.
|
||||||
|
|
||||||
|
It is also possible to create a ConfigState instance that provides methods
|
||||||
|
equivalent to the top-level functions. This allows concurrent configuration
|
||||||
|
options. See the ConfigState documentation for more details.
|
||||||
|
|
||||||
|
The following configuration options are available:
|
||||||
|
* Indent
|
||||||
|
String to use for each indentation level for Dump functions.
|
||||||
|
It is a single space by default. A popular alternative is "\t".
|
||||||
|
|
||||||
|
* MaxDepth
|
||||||
|
Maximum number of levels to descend into nested data structures.
|
||||||
|
There is no limit by default.
|
||||||
|
|
||||||
|
* DisableMethods
|
||||||
|
Disables invocation of error and Stringer interface methods.
|
||||||
|
Method invocation is enabled by default.
|
||||||
|
|
||||||
|
* DisablePointerMethods
|
||||||
|
Disables invocation of error and Stringer interface methods on types
|
||||||
|
which only accept pointer receivers from non-pointer variables.
|
||||||
|
Pointer method invocation is enabled by default.
|
||||||
|
|
||||||
|
* DisablePointerAddresses
|
||||||
|
DisablePointerAddresses specifies whether to disable the printing of
|
||||||
|
pointer addresses. This is useful when diffing data structures in tests.
|
||||||
|
|
||||||
|
* DisableCapacities
|
||||||
|
DisableCapacities specifies whether to disable the printing of
|
||||||
|
capacities for arrays, slices, maps and channels. This is useful when
|
||||||
|
diffing data structures in tests.
|
||||||
|
|
||||||
|
* ContinueOnMethod
|
||||||
|
Enables recursion into types after invoking error and Stringer interface
|
||||||
|
methods. Recursion after method invocation is disabled by default.
|
||||||
|
|
||||||
|
* SortKeys
|
||||||
|
Specifies map keys should be sorted before being printed. Use
|
||||||
|
this to have a more deterministic, diffable output. Note that
|
||||||
|
only native types (bool, int, uint, floats, uintptr and string)
|
||||||
|
and types which implement error or Stringer interfaces are
|
||||||
|
supported with other types sorted according to the
|
||||||
|
reflect.Value.String() output which guarantees display
|
||||||
|
stability. Natural map order is used by default.
|
||||||
|
|
||||||
|
* SpewKeys
|
||||||
|
Specifies that, as a last resort attempt, map keys should be
|
||||||
|
spewed to strings and sorted by those strings. This is only
|
||||||
|
considered if SortKeys is true.
|
||||||
|
|
||||||
|
Dump Usage
|
||||||
|
|
||||||
|
Simply call spew.Dump with a list of variables you want to dump:
|
||||||
|
|
||||||
|
spew.Dump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||||
|
io.Writer. For example, to dump to standard error:
|
||||||
|
|
||||||
|
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||||
|
|
||||||
|
str := spew.Sdump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
Sample Dump Output
|
||||||
|
|
||||||
|
See the Dump example for details on the setup of the types and variables being
|
||||||
|
shown here.
|
||||||
|
|
||||||
|
(main.Foo) {
|
||||||
|
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||||
|
flag: (main.Flag) flagTwo,
|
||||||
|
data: (uintptr) <nil>
|
||||||
|
}),
|
||||||
|
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||||
|
(string) (len=3) "one": (bool) true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||||
|
command as shown.
|
||||||
|
([]uint8) (len=32 cap=32) {
|
||||||
|
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||||
|
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||||
|
00000020 31 32 |12|
|
||||||
|
}
|
||||||
|
|
||||||
|
Custom Formatter
|
||||||
|
|
||||||
|
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||||
|
so that it integrates cleanly with standard fmt package printing functions. The
|
||||||
|
formatter is useful for inline printing of smaller data types similar to the
|
||||||
|
standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Custom Formatter Usage
|
||||||
|
|
||||||
|
The simplest way to make use of the spew custom formatter is to call one of the
|
||||||
|
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||||
|
functions have syntax you are most likely already familiar with:
|
||||||
|
|
||||||
|
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
spew.Println(myVar, myVar2)
|
||||||
|
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
|
||||||
|
See the Index for the full list convenience functions.
|
||||||
|
|
||||||
|
Sample Formatter Output
|
||||||
|
|
||||||
|
Double pointer to a uint8:
|
||||||
|
%v: <**>5
|
||||||
|
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||||
|
%#v: (**uint8)5
|
||||||
|
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||||
|
|
||||||
|
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||||
|
%v: <*>{1 <*><shown>}
|
||||||
|
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||||
|
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||||
|
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||||
|
|
||||||
|
See the Printf example for details on the setup of variables being shown
|
||||||
|
here.
|
||||||
|
|
||||||
|
Errors
|
||||||
|
|
||||||
|
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||||
|
detects them and handles them internally by printing the panic information
|
||||||
|
inline with the output. Since spew is intended to provide deep pretty printing
|
||||||
|
capabilities on structures, it intentionally does not return any errors.
|
||||||
|
*/
|
||||||
|
package spew
|
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
|
@ -0,0 +1,509 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||||
|
// convert cgo types to uint8 slices for hexdumping.
|
||||||
|
uint8Type = reflect.TypeOf(uint8(0))
|
||||||
|
|
||||||
|
// cCharRE is a regular expression that matches a cgo char.
|
||||||
|
// It is used to detect character arrays to hexdump them.
|
||||||
|
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||||
|
|
||||||
|
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||||
|
// char. It is used to detect unsigned character arrays to hexdump
|
||||||
|
// them.
|
||||||
|
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||||
|
|
||||||
|
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||||
|
// It is used to detect uint8_t arrays to hexdump them.
|
||||||
|
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// dumpState contains information about the state of a dump operation.
|
||||||
|
type dumpState struct {
|
||||||
|
w io.Writer
|
||||||
|
depth int
|
||||||
|
pointers map[uintptr]int
|
||||||
|
ignoreNextType bool
|
||||||
|
ignoreNextIndent bool
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// indent performs indentation according to the depth level and cs.Indent
|
||||||
|
// option.
|
||||||
|
func (d *dumpState) indent() {
|
||||||
|
if d.ignoreNextIndent {
|
||||||
|
d.ignoreNextIndent = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||||
|
// This is useful for data types like structs, arrays, slices, and maps which
|
||||||
|
// can contain varying types packed inside an interface.
|
||||||
|
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||||
|
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||||
|
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||||
|
// Remove pointers at or below the current depth from map used to detect
|
||||||
|
// circular refs.
|
||||||
|
for k, depth := range d.pointers {
|
||||||
|
if depth >= d.depth {
|
||||||
|
delete(d.pointers, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep list of all dereferenced pointers to show later.
|
||||||
|
pointerChain := make([]uintptr, 0)
|
||||||
|
|
||||||
|
// Figure out how many levels of indirection there are by dereferencing
|
||||||
|
// pointers and unpacking interfaces down the chain while detecting circular
|
||||||
|
// references.
|
||||||
|
nilFound := false
|
||||||
|
cycleFound := false
|
||||||
|
indirects := 0
|
||||||
|
ve := v
|
||||||
|
for ve.Kind() == reflect.Ptr {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
indirects++
|
||||||
|
addr := ve.Pointer()
|
||||||
|
pointerChain = append(pointerChain, addr)
|
||||||
|
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||||
|
cycleFound = true
|
||||||
|
indirects--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d.pointers[addr] = d.depth
|
||||||
|
|
||||||
|
ve = ve.Elem()
|
||||||
|
if ve.Kind() == reflect.Interface {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ve = ve.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display type information.
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||||
|
d.w.Write([]byte(ve.Type().String()))
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
|
||||||
|
// Display pointer information.
|
||||||
|
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
for i, addr := range pointerChain {
|
||||||
|
if i > 0 {
|
||||||
|
d.w.Write(pointerChainBytes)
|
||||||
|
}
|
||||||
|
printHexPtr(d.w, addr)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display dereferenced value.
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
switch {
|
||||||
|
case nilFound:
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
|
||||||
|
case cycleFound:
|
||||||
|
d.w.Write(circularBytes)
|
||||||
|
|
||||||
|
default:
|
||||||
|
d.ignoreNextType = true
|
||||||
|
d.dump(ve)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||||
|
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||||
|
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||||
|
// Determine whether this type should be hex dumped or not. Also,
|
||||||
|
// for types which should be hexdumped, try to use the underlying data
|
||||||
|
// first, then fall back to trying to convert them to a uint8 slice.
|
||||||
|
var buf []uint8
|
||||||
|
doConvert := false
|
||||||
|
doHexDump := false
|
||||||
|
numEntries := v.Len()
|
||||||
|
if numEntries > 0 {
|
||||||
|
vt := v.Index(0).Type()
|
||||||
|
vts := vt.String()
|
||||||
|
switch {
|
||||||
|
// C types that need to be converted.
|
||||||
|
case cCharRE.MatchString(vts):
|
||||||
|
fallthrough
|
||||||
|
case cUnsignedCharRE.MatchString(vts):
|
||||||
|
fallthrough
|
||||||
|
case cUint8tCharRE.MatchString(vts):
|
||||||
|
doConvert = true
|
||||||
|
|
||||||
|
// Try to use existing uint8 slices and fall back to converting
|
||||||
|
// and copying if that fails.
|
||||||
|
case vt.Kind() == reflect.Uint8:
|
||||||
|
// We need an addressable interface to convert the type
|
||||||
|
// to a byte slice. However, the reflect package won't
|
||||||
|
// give us an interface on certain things like
|
||||||
|
// unexported struct fields in order to enforce
|
||||||
|
// visibility rules. We use unsafe, when available, to
|
||||||
|
// bypass these restrictions since this package does not
|
||||||
|
// mutate the values.
|
||||||
|
vs := v
|
||||||
|
if !vs.CanInterface() || !vs.CanAddr() {
|
||||||
|
vs = unsafeReflectValue(vs)
|
||||||
|
}
|
||||||
|
if !UnsafeDisabled {
|
||||||
|
vs = vs.Slice(0, numEntries)
|
||||||
|
|
||||||
|
// Use the existing uint8 slice if it can be
|
||||||
|
// type asserted.
|
||||||
|
iface := vs.Interface()
|
||||||
|
if slice, ok := iface.([]uint8); ok {
|
||||||
|
buf = slice
|
||||||
|
doHexDump = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The underlying data needs to be converted if it can't
|
||||||
|
// be type asserted to a uint8 slice.
|
||||||
|
doConvert = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy and convert the underlying type if needed.
|
||||||
|
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||||
|
// Convert and copy each element into a uint8 byte
|
||||||
|
// slice.
|
||||||
|
buf = make([]uint8, numEntries)
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
vv := v.Index(i)
|
||||||
|
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||||
|
}
|
||||||
|
doHexDump = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hexdump the entire slice as needed.
|
||||||
|
if doHexDump {
|
||||||
|
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||||
|
str := indent + hex.Dump(buf)
|
||||||
|
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||||
|
str = strings.TrimRight(str, d.cs.Indent)
|
||||||
|
d.w.Write([]byte(str))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively call dump for each item.
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
d.dump(d.unpackValue(v.Index(i)))
|
||||||
|
if i < (numEntries - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||||
|
// value to figure out what kind of object we are dealing with and formats it
|
||||||
|
// appropriately. It is a recursive function, however circular data structures
|
||||||
|
// are detected and handled properly.
|
||||||
|
func (d *dumpState) dump(v reflect.Value) {
|
||||||
|
// Handle invalid reflect values immediately.
|
||||||
|
kind := v.Kind()
|
||||||
|
if kind == reflect.Invalid {
|
||||||
|
d.w.Write(invalidAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle pointers specially.
|
||||||
|
if kind == reflect.Ptr {
|
||||||
|
d.indent()
|
||||||
|
d.dumpPtr(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print type information unless already handled elsewhere.
|
||||||
|
if !d.ignoreNextType {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
d.w.Write([]byte(v.Type().String()))
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
d.ignoreNextType = false
|
||||||
|
|
||||||
|
// Display length and capacity if the built-in len and cap functions
|
||||||
|
// work with the value's kind and the len/cap itself is non-zero.
|
||||||
|
valueLen, valueCap := 0, 0
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||||
|
valueLen, valueCap = v.Len(), v.Cap()
|
||||||
|
case reflect.Map, reflect.String:
|
||||||
|
valueLen = v.Len()
|
||||||
|
}
|
||||||
|
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
if valueLen != 0 {
|
||||||
|
d.w.Write(lenEqualsBytes)
|
||||||
|
printInt(d.w, int64(valueLen), 10)
|
||||||
|
}
|
||||||
|
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||||
|
if valueLen != 0 {
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
d.w.Write(capEqualsBytes)
|
||||||
|
printInt(d.w, int64(valueCap), 10)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||||
|
// is enabled
|
||||||
|
if !d.cs.DisableMethods {
|
||||||
|
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||||
|
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case reflect.Invalid:
|
||||||
|
// Do nothing. We should never get here since invalid has already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
printBool(d.w, v.Bool())
|
||||||
|
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
printInt(d.w, v.Int(), 10)
|
||||||
|
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
printUint(d.w, v.Uint(), 10)
|
||||||
|
|
||||||
|
case reflect.Float32:
|
||||||
|
printFloat(d.w, v.Float(), 32)
|
||||||
|
|
||||||
|
case reflect.Float64:
|
||||||
|
printFloat(d.w, v.Float(), 64)
|
||||||
|
|
||||||
|
case reflect.Complex64:
|
||||||
|
printComplex(d.w, v.Complex(), 32)
|
||||||
|
|
||||||
|
case reflect.Complex128:
|
||||||
|
printComplex(d.w, v.Complex(), 64)
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case reflect.Array:
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.dumpSlice(v)
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
// The only time we should get here is for nil interfaces due to
|
||||||
|
// unpackValue calls.
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
// Do nothing. We should never get here since pointers have already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
// nil maps should be indicated as different than empty maps
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
numEntries := v.Len()
|
||||||
|
keys := v.MapKeys()
|
||||||
|
if d.cs.SortKeys {
|
||||||
|
sortValues(keys, d.cs)
|
||||||
|
}
|
||||||
|
for i, key := range keys {
|
||||||
|
d.dump(d.unpackValue(key))
|
||||||
|
d.w.Write(colonSpaceBytes)
|
||||||
|
d.ignoreNextIndent = true
|
||||||
|
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||||
|
if i < (numEntries - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
vt := v.Type()
|
||||||
|
numFields := v.NumField()
|
||||||
|
for i := 0; i < numFields; i++ {
|
||||||
|
d.indent()
|
||||||
|
vtf := vt.Field(i)
|
||||||
|
d.w.Write([]byte(vtf.Name))
|
||||||
|
d.w.Write(colonSpaceBytes)
|
||||||
|
d.ignoreNextIndent = true
|
||||||
|
d.dump(d.unpackValue(v.Field(i)))
|
||||||
|
if i < (numFields - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Uintptr:
|
||||||
|
printHexPtr(d.w, uintptr(v.Uint()))
|
||||||
|
|
||||||
|
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||||
|
printHexPtr(d.w, v.Pointer())
|
||||||
|
|
||||||
|
// There were not any other types at the time this code was written, but
|
||||||
|
// fall back to letting the default fmt package handle it in case any new
|
||||||
|
// types are added.
|
||||||
|
default:
|
||||||
|
if v.CanInterface() {
|
||||||
|
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(d.w, "%v", v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fdump is a helper function to consolidate the logic from the various public
|
||||||
|
// methods which take varying writers and config states.
|
||||||
|
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||||
|
for _, arg := range a {
|
||||||
|
if arg == nil {
|
||||||
|
w.Write(interfaceBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
w.Write(nilAngleBytes)
|
||||||
|
w.Write(newlineBytes)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
d := dumpState{w: w, cs: cs}
|
||||||
|
d.pointers = make(map[uintptr]int)
|
||||||
|
d.dump(reflect.ValueOf(arg))
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||||
|
// exactly the same as Dump.
|
||||||
|
func Fdump(w io.Writer, a ...interface{}) {
|
||||||
|
fdump(&Config, w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||||
|
// as Dump.
|
||||||
|
func Sdump(a ...interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fdump(&Config, &buf, a...)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Dump displays the passed parameters to standard out with newlines, customizable
|
||||||
|
indentation, and additional debug information such as complete types and all
|
||||||
|
pointer addresses used to indirect to the final value. It provides the
|
||||||
|
following features over the built-in printing facilities provided by the fmt
|
||||||
|
package:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output
|
||||||
|
|
||||||
|
The configuration options are controlled by an exported package global,
|
||||||
|
spew.Config. See ConfigState for options documentation.
|
||||||
|
|
||||||
|
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||||
|
get the formatted result as a string.
|
||||||
|
*/
|
||||||
|
func Dump(a ...interface{}) {
|
||||||
|
fdump(&Config, os.Stdout, a...)
|
||||||
|
}
|
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
|
@ -0,0 +1,419 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||||
|
const supportedFlags = "0-+# "
|
||||||
|
|
||||||
|
// formatState implements the fmt.Formatter interface and contains information
|
||||||
|
// about the state of a formatting operation. The NewFormatter function can
|
||||||
|
// be used to get a new Formatter which can be used directly as arguments
|
||||||
|
// in standard fmt package printing calls.
|
||||||
|
type formatState struct {
|
||||||
|
value interface{}
|
||||||
|
fs fmt.State
|
||||||
|
depth int
|
||||||
|
pointers map[uintptr]int
|
||||||
|
ignoreNextType bool
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildDefaultFormat recreates the original format string without precision
|
||||||
|
// and width information to pass in to fmt.Sprintf in the case of an
|
||||||
|
// unrecognized type. Unless new types are added to the language, this
|
||||||
|
// function won't ever be called.
|
||||||
|
func (f *formatState) buildDefaultFormat() (format string) {
|
||||||
|
buf := bytes.NewBuffer(percentBytes)
|
||||||
|
|
||||||
|
for _, flag := range supportedFlags {
|
||||||
|
if f.fs.Flag(int(flag)) {
|
||||||
|
buf.WriteRune(flag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteRune('v')
|
||||||
|
|
||||||
|
format = buf.String()
|
||||||
|
return format
|
||||||
|
}
|
||||||
|
|
||||||
|
// constructOrigFormat recreates the original format string including precision
|
||||||
|
// and width information to pass along to the standard fmt package. This allows
|
||||||
|
// automatic deferral of all format strings this package doesn't support.
|
||||||
|
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||||
|
buf := bytes.NewBuffer(percentBytes)
|
||||||
|
|
||||||
|
for _, flag := range supportedFlags {
|
||||||
|
if f.fs.Flag(int(flag)) {
|
||||||
|
buf.WriteRune(flag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if width, ok := f.fs.Width(); ok {
|
||||||
|
buf.WriteString(strconv.Itoa(width))
|
||||||
|
}
|
||||||
|
|
||||||
|
if precision, ok := f.fs.Precision(); ok {
|
||||||
|
buf.Write(precisionBytes)
|
||||||
|
buf.WriteString(strconv.Itoa(precision))
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteRune(verb)
|
||||||
|
|
||||||
|
format = buf.String()
|
||||||
|
return format
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||||
|
// ensures that types for values which have been unpacked from an interface
|
||||||
|
// are displayed when the show types flag is also set.
|
||||||
|
// This is useful for data types like structs, arrays, slices, and maps which
|
||||||
|
// can contain varying types packed inside an interface.
|
||||||
|
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||||
|
if v.Kind() == reflect.Interface {
|
||||||
|
f.ignoreNextType = false
|
||||||
|
if !v.IsNil() {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||||
|
func (f *formatState) formatPtr(v reflect.Value) {
|
||||||
|
// Display nil if top level pointer is nil.
|
||||||
|
showTypes := f.fs.Flag('#')
|
||||||
|
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove pointers at or below the current depth from map used to detect
|
||||||
|
// circular refs.
|
||||||
|
for k, depth := range f.pointers {
|
||||||
|
if depth >= f.depth {
|
||||||
|
delete(f.pointers, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep list of all dereferenced pointers to possibly show later.
|
||||||
|
pointerChain := make([]uintptr, 0)
|
||||||
|
|
||||||
|
// Figure out how many levels of indirection there are by derferencing
|
||||||
|
// pointers and unpacking interfaces down the chain while detecting circular
|
||||||
|
// references.
|
||||||
|
nilFound := false
|
||||||
|
cycleFound := false
|
||||||
|
indirects := 0
|
||||||
|
ve := v
|
||||||
|
for ve.Kind() == reflect.Ptr {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
indirects++
|
||||||
|
addr := ve.Pointer()
|
||||||
|
pointerChain = append(pointerChain, addr)
|
||||||
|
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||||
|
cycleFound = true
|
||||||
|
indirects--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
f.pointers[addr] = f.depth
|
||||||
|
|
||||||
|
ve = ve.Elem()
|
||||||
|
if ve.Kind() == reflect.Interface {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ve = ve.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display type or indirection level depending on flags.
|
||||||
|
if showTypes && !f.ignoreNextType {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||||
|
f.fs.Write([]byte(ve.Type().String()))
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
} else {
|
||||||
|
if nilFound || cycleFound {
|
||||||
|
indirects += strings.Count(ve.Type().String(), "*")
|
||||||
|
}
|
||||||
|
f.fs.Write(openAngleBytes)
|
||||||
|
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||||
|
f.fs.Write(closeAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display pointer information depending on flags.
|
||||||
|
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
for i, addr := range pointerChain {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(pointerChainBytes)
|
||||||
|
}
|
||||||
|
printHexPtr(f.fs, addr)
|
||||||
|
}
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display dereferenced value.
|
||||||
|
switch {
|
||||||
|
case nilFound:
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
|
||||||
|
case cycleFound:
|
||||||
|
f.fs.Write(circularShortBytes)
|
||||||
|
|
||||||
|
default:
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(ve)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// format is the main workhorse for providing the Formatter interface. It
|
||||||
|
// uses the passed reflect value to figure out what kind of object we are
|
||||||
|
// dealing with and formats it appropriately. It is a recursive function,
|
||||||
|
// however circular data structures are detected and handled properly.
|
||||||
|
func (f *formatState) format(v reflect.Value) {
|
||||||
|
// Handle invalid reflect values immediately.
|
||||||
|
kind := v.Kind()
|
||||||
|
if kind == reflect.Invalid {
|
||||||
|
f.fs.Write(invalidAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle pointers specially.
|
||||||
|
if kind == reflect.Ptr {
|
||||||
|
f.formatPtr(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print type information unless already handled elsewhere.
|
||||||
|
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
f.fs.Write([]byte(v.Type().String()))
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = false
|
||||||
|
|
||||||
|
// Call Stringer/error interfaces if they exist and the handle methods
|
||||||
|
// flag is enabled.
|
||||||
|
if !f.cs.DisableMethods {
|
||||||
|
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||||
|
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case reflect.Invalid:
|
||||||
|
// Do nothing. We should never get here since invalid has already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
printBool(f.fs, v.Bool())
|
||||||
|
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
printInt(f.fs, v.Int(), 10)
|
||||||
|
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
printUint(f.fs, v.Uint(), 10)
|
||||||
|
|
||||||
|
case reflect.Float32:
|
||||||
|
printFloat(f.fs, v.Float(), 32)
|
||||||
|
|
||||||
|
case reflect.Float64:
|
||||||
|
printFloat(f.fs, v.Float(), 64)
|
||||||
|
|
||||||
|
case reflect.Complex64:
|
||||||
|
printComplex(f.fs, v.Complex(), 32)
|
||||||
|
|
||||||
|
case reflect.Complex128:
|
||||||
|
printComplex(f.fs, v.Complex(), 64)
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case reflect.Array:
|
||||||
|
f.fs.Write(openBracketBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
numEntries := v.Len()
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(v.Index(i)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeBracketBytes)
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
f.fs.Write([]byte(v.String()))
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
// The only time we should get here is for nil interfaces due to
|
||||||
|
// unpackValue calls.
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
// Do nothing. We should never get here since pointers have already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
// nil maps should be indicated as different than empty maps
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
f.fs.Write(openMapBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
keys := v.MapKeys()
|
||||||
|
if f.cs.SortKeys {
|
||||||
|
sortValues(keys, f.cs)
|
||||||
|
}
|
||||||
|
for i, key := range keys {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(key))
|
||||||
|
f.fs.Write(colonBytes)
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(v.MapIndex(key)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeMapBytes)
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
numFields := v.NumField()
|
||||||
|
f.fs.Write(openBraceBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
vt := v.Type()
|
||||||
|
for i := 0; i < numFields; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
vtf := vt.Field(i)
|
||||||
|
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||||
|
f.fs.Write([]byte(vtf.Name))
|
||||||
|
f.fs.Write(colonBytes)
|
||||||
|
}
|
||||||
|
f.format(f.unpackValue(v.Field(i)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Uintptr:
|
||||||
|
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||||
|
|
||||||
|
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||||
|
printHexPtr(f.fs, v.Pointer())
|
||||||
|
|
||||||
|
// There were not any other types at the time this code was written, but
|
||||||
|
// fall back to letting the default fmt package handle it if any get added.
|
||||||
|
default:
|
||||||
|
format := f.buildDefaultFormat()
|
||||||
|
if v.CanInterface() {
|
||||||
|
fmt.Fprintf(f.fs, format, v.Interface())
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(f.fs, format, v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||||
|
// details.
|
||||||
|
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||||
|
f.fs = fs
|
||||||
|
|
||||||
|
// Use standard formatting for verbs that are not v.
|
||||||
|
if verb != 'v' {
|
||||||
|
format := f.constructOrigFormat(verb)
|
||||||
|
fmt.Fprintf(fs, format, f.value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.value == nil {
|
||||||
|
if fs.Flag('#') {
|
||||||
|
fs.Write(interfaceBytes)
|
||||||
|
}
|
||||||
|
fs.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f.format(reflect.ValueOf(f.value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFormatter is a helper function to consolidate the logic from the various
|
||||||
|
// public methods which take varying config states.
|
||||||
|
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||||
|
fs := &formatState{value: v, cs: cs}
|
||||||
|
fs.pointers = make(map[uintptr]int)
|
||||||
|
return fs
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||||
|
interface. As a result, it integrates cleanly with standard fmt package
|
||||||
|
printing functions. The formatter is useful for inline printing of smaller data
|
||||||
|
types similar to the standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Typically this function shouldn't be called directly. It is much easier to make
|
||||||
|
use of the custom formatter by calling one of the convenience functions such as
|
||||||
|
Printf, Println, or Fprintf.
|
||||||
|
*/
|
||||||
|
func NewFormatter(v interface{}) fmt.Formatter {
|
||||||
|
return newFormatter(&Config, v)
|
||||||
|
}
|
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the formatted string as a value that satisfies error. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Errorf(format string, a ...interface{}) (err error) {
|
||||||
|
return fmt.Errorf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprint(w, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintln(w, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Print(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Print(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Printf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Println(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Println(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprint(a ...interface{}) string {
|
||||||
|
return fmt.Sprint(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprintf(format string, a ...interface{}) string {
|
||||||
|
return fmt.Sprintf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||||
|
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprintln(a ...interface{}) string {
|
||||||
|
return fmt.Sprintln(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||||
|
// length with each argument converted to a default spew Formatter interface.
|
||||||
|
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||||
|
formatters = make([]interface{}, len(args))
|
||||||
|
for index, arg := range args {
|
||||||
|
formatters[index] = NewFormatter(arg)
|
||||||
|
}
|
||||||
|
return formatters
|
||||||
|
}
|
22
vendor/github.com/deckarep/golang-set/.gitignore
generated
vendored
Normal file
22
vendor/github.com/deckarep/golang-set/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
11
vendor/github.com/deckarep/golang-set/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/deckarep/golang-set/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.8
|
||||||
|
- 1.9
|
||||||
|
- tip
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -race ./...
|
||||||
|
- go test -bench=.
|
||||||
|
|
22
vendor/github.com/deckarep/golang-set/LICENSE
generated
vendored
Normal file
22
vendor/github.com/deckarep/golang-set/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
95
vendor/github.com/deckarep/golang-set/README.md
generated
vendored
Normal file
95
vendor/github.com/deckarep/golang-set/README.md
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
[![Build Status](https://travis-ci.org/deckarep/golang-set.svg?branch=master)](https://travis-ci.org/deckarep/golang-set)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/deckarep/golang-set)](https://goreportcard.com/report/github.com/deckarep/golang-set)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.svg)](http://godoc.org/github.com/deckarep/golang-set)
|
||||||
|
|
||||||
|
## golang-set
|
||||||
|
|
||||||
|
|
||||||
|
The missing set collection for the Go language. Until Go has sets built-in...use this.
|
||||||
|
|
||||||
|
Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python.
|
||||||
|
You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository
|
||||||
|
and carry-on and to the rest that find this useful please contribute in helping me make it better by:
|
||||||
|
|
||||||
|
* Helping to make more idiomatic improvements to the code.
|
||||||
|
* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~
|
||||||
|
* Helping to make the unit-tests more robust and kick-ass.
|
||||||
|
* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set)
|
||||||
|
* Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.)
|
||||||
|
|
||||||
|
I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang)
|
||||||
|
|
||||||
|
*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types.
|
||||||
|
|
||||||
|
## Features (as of 9/22/2014)
|
||||||
|
|
||||||
|
* a CartesianProduct() method has been added with unit-tests: [Read more about the cartesian product](http://en.wikipedia.org/wiki/Cartesian_product)
|
||||||
|
|
||||||
|
## Features (as of 9/15/2014)
|
||||||
|
|
||||||
|
* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set)
|
||||||
|
|
||||||
|
## Features (as of 4/22/2014)
|
||||||
|
|
||||||
|
* One common interface to both implementations
|
||||||
|
* Two set implementations to choose from
|
||||||
|
* a thread-safe implementation designed for concurrent use
|
||||||
|
* a non-thread-safe implementation designed for performance
|
||||||
|
* 75 benchmarks for both implementations
|
||||||
|
* 35 unit tests for both implementations
|
||||||
|
* 14 concurrent tests for the thread-safe implementation
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind
|
||||||
|
however that the Python set is a built-in type and supports additional features and syntax that make it awesome.
|
||||||
|
|
||||||
|
## Examples but not exhaustive:
|
||||||
|
|
||||||
|
```go
|
||||||
|
requiredClasses := mapset.NewSet()
|
||||||
|
requiredClasses.Add("Cooking")
|
||||||
|
requiredClasses.Add("English")
|
||||||
|
requiredClasses.Add("Math")
|
||||||
|
requiredClasses.Add("Biology")
|
||||||
|
|
||||||
|
scienceSlice := []interface{}{"Biology", "Chemistry"}
|
||||||
|
scienceClasses := mapset.NewSetFromSlice(scienceSlice)
|
||||||
|
|
||||||
|
electiveClasses := mapset.NewSet()
|
||||||
|
electiveClasses.Add("Welding")
|
||||||
|
electiveClasses.Add("Music")
|
||||||
|
electiveClasses.Add("Automotive")
|
||||||
|
|
||||||
|
bonusClasses := mapset.NewSet()
|
||||||
|
bonusClasses.Add("Go Programming")
|
||||||
|
bonusClasses.Add("Python Programming")
|
||||||
|
|
||||||
|
//Show me all the available classes I can take
|
||||||
|
allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses)
|
||||||
|
fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming}
|
||||||
|
|
||||||
|
|
||||||
|
//Is cooking considered a science class?
|
||||||
|
fmt.Println(scienceClasses.Contains("Cooking")) //false
|
||||||
|
|
||||||
|
//Show me all classes that are not science classes, since I hate science.
|
||||||
|
fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding}
|
||||||
|
|
||||||
|
//Which science classes are also required classes?
|
||||||
|
fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology}
|
||||||
|
|
||||||
|
//How many bonus classes do you offer?
|
||||||
|
fmt.Println(bonusClasses.Cardinality()) //2
|
||||||
|
|
||||||
|
//Do you have the following classes? Welding, Automotive and English?
|
||||||
|
fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true
|
||||||
|
```
|
||||||
|
|
||||||
|
Thanks!
|
||||||
|
|
||||||
|
-Ralph
|
||||||
|
|
||||||
|
[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge")
|
||||||
|
|
||||||
|
[![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon)
|
58
vendor/github.com/deckarep/golang-set/iterator.go
generated
vendored
Normal file
58
vendor/github.com/deckarep/golang-set/iterator.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
/*
|
||||||
|
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package mapset
|
||||||
|
|
||||||
|
// Iterator defines an iterator over a Set, its C channel can be used to range over the Set's
|
||||||
|
// elements.
|
||||||
|
type Iterator struct {
|
||||||
|
C <-chan interface{}
|
||||||
|
stop chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the Iterator, no further elements will be received on C, C will be closed.
|
||||||
|
func (i *Iterator) Stop() {
|
||||||
|
// Allows for Stop() to be called multiple times
|
||||||
|
// (close() panics when called on already closed channel)
|
||||||
|
defer func() {
|
||||||
|
recover()
|
||||||
|
}()
|
||||||
|
|
||||||
|
close(i.stop)
|
||||||
|
|
||||||
|
// Exhaust any remaining elements.
|
||||||
|
for range i.C {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newIterator returns a new Iterator instance together with its item and stop channels.
|
||||||
|
func newIterator() (*Iterator, chan<- interface{}, <-chan struct{}) {
|
||||||
|
itemChan := make(chan interface{})
|
||||||
|
stopChan := make(chan struct{})
|
||||||
|
return &Iterator{
|
||||||
|
C: itemChan,
|
||||||
|
stop: stopChan,
|
||||||
|
}, itemChan, stopChan
|
||||||
|
}
|
217
vendor/github.com/deckarep/golang-set/set.go
generated
vendored
Normal file
217
vendor/github.com/deckarep/golang-set/set.go
generated
vendored
Normal file
|
@ -0,0 +1,217 @@
|
||||||
|
/*
|
||||||
|
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package mapset implements a simple and generic set collection.
|
||||||
|
// Items stored within it are unordered and unique. It supports
|
||||||
|
// typical set operations: membership testing, intersection, union,
|
||||||
|
// difference, symmetric difference and cloning.
|
||||||
|
//
|
||||||
|
// Package mapset provides two implementations of the Set
|
||||||
|
// interface. The default implementation is safe for concurrent
|
||||||
|
// access, but a non-thread-safe implementation is also provided for
|
||||||
|
// programs that can benefit from the slight speed improvement and
|
||||||
|
// that can enforce mutual exclusion through other means.
|
||||||
|
package mapset
|
||||||
|
|
||||||
|
// Set is the primary interface provided by the mapset package. It
|
||||||
|
// represents an unordered set of data and a large number of
|
||||||
|
// operations that can be applied to that set.
|
||||||
|
type Set interface {
|
||||||
|
// Adds an element to the set. Returns whether
|
||||||
|
// the item was added.
|
||||||
|
Add(i interface{}) bool
|
||||||
|
|
||||||
|
// Returns the number of elements in the set.
|
||||||
|
Cardinality() int
|
||||||
|
|
||||||
|
// Removes all elements from the set, leaving
|
||||||
|
// the empty set.
|
||||||
|
Clear()
|
||||||
|
|
||||||
|
// Returns a clone of the set using the same
|
||||||
|
// implementation, duplicating all keys.
|
||||||
|
Clone() Set
|
||||||
|
|
||||||
|
// Returns whether the given items
|
||||||
|
// are all in the set.
|
||||||
|
Contains(i ...interface{}) bool
|
||||||
|
|
||||||
|
// Returns the difference between this set
|
||||||
|
// and other. The returned set will contain
|
||||||
|
// all elements of this set that are not also
|
||||||
|
// elements of other.
|
||||||
|
//
|
||||||
|
// Note that the argument to Difference
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, Difference will
|
||||||
|
// panic.
|
||||||
|
Difference(other Set) Set
|
||||||
|
|
||||||
|
// Determines if two sets are equal to each
|
||||||
|
// other. If they have the same cardinality
|
||||||
|
// and contain the same elements, they are
|
||||||
|
// considered equal. The order in which
|
||||||
|
// the elements were added is irrelevant.
|
||||||
|
//
|
||||||
|
// Note that the argument to Equal must be
|
||||||
|
// of the same type as the receiver of the
|
||||||
|
// method. Otherwise, Equal will panic.
|
||||||
|
Equal(other Set) bool
|
||||||
|
|
||||||
|
// Returns a new set containing only the elements
|
||||||
|
// that exist only in both sets.
|
||||||
|
//
|
||||||
|
// Note that the argument to Intersect
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, Intersect will
|
||||||
|
// panic.
|
||||||
|
Intersect(other Set) Set
|
||||||
|
|
||||||
|
// Determines if every element in this set is in
|
||||||
|
// the other set but the two sets are not equal.
|
||||||
|
//
|
||||||
|
// Note that the argument to IsProperSubset
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, IsProperSubset
|
||||||
|
// will panic.
|
||||||
|
IsProperSubset(other Set) bool
|
||||||
|
|
||||||
|
// Determines if every element in the other set
|
||||||
|
// is in this set but the two sets are not
|
||||||
|
// equal.
|
||||||
|
//
|
||||||
|
// Note that the argument to IsSuperset
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, IsSuperset will
|
||||||
|
// panic.
|
||||||
|
IsProperSuperset(other Set) bool
|
||||||
|
|
||||||
|
// Determines if every element in this set is in
|
||||||
|
// the other set.
|
||||||
|
//
|
||||||
|
// Note that the argument to IsSubset
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, IsSubset will
|
||||||
|
// panic.
|
||||||
|
IsSubset(other Set) bool
|
||||||
|
|
||||||
|
// Determines if every element in the other set
|
||||||
|
// is in this set.
|
||||||
|
//
|
||||||
|
// Note that the argument to IsSuperset
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, IsSuperset will
|
||||||
|
// panic.
|
||||||
|
IsSuperset(other Set) bool
|
||||||
|
|
||||||
|
// Iterates over elements and executes the passed func against each element.
|
||||||
|
// If passed func returns true, stop iteration at the time.
|
||||||
|
Each(func(interface{}) bool)
|
||||||
|
|
||||||
|
// Returns a channel of elements that you can
|
||||||
|
// range over.
|
||||||
|
Iter() <-chan interface{}
|
||||||
|
|
||||||
|
// Returns an Iterator object that you can
|
||||||
|
// use to range over the set.
|
||||||
|
Iterator() *Iterator
|
||||||
|
|
||||||
|
// Remove a single element from the set.
|
||||||
|
Remove(i interface{})
|
||||||
|
|
||||||
|
// Provides a convenient string representation
|
||||||
|
// of the current state of the set.
|
||||||
|
String() string
|
||||||
|
|
||||||
|
// Returns a new set with all elements which are
|
||||||
|
// in either this set or the other set but not in both.
|
||||||
|
//
|
||||||
|
// Note that the argument to SymmetricDifference
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, SymmetricDifference
|
||||||
|
// will panic.
|
||||||
|
SymmetricDifference(other Set) Set
|
||||||
|
|
||||||
|
// Returns a new set with all elements in both sets.
|
||||||
|
//
|
||||||
|
// Note that the argument to Union must be of the
|
||||||
|
|
||||||
|
// same type as the receiver of the method.
|
||||||
|
// Otherwise, IsSuperset will panic.
|
||||||
|
Union(other Set) Set
|
||||||
|
|
||||||
|
// Pop removes and returns an arbitrary item from the set.
|
||||||
|
Pop() interface{}
|
||||||
|
|
||||||
|
// Returns all subsets of a given set (Power Set).
|
||||||
|
PowerSet() Set
|
||||||
|
|
||||||
|
// Returns the Cartesian Product of two sets.
|
||||||
|
CartesianProduct(other Set) Set
|
||||||
|
|
||||||
|
// Returns the members of the set as a slice.
|
||||||
|
ToSlice() []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSet creates and returns a reference to an empty set. Operations
|
||||||
|
// on the resulting set are thread-safe.
|
||||||
|
func NewSet(s ...interface{}) Set {
|
||||||
|
set := newThreadSafeSet()
|
||||||
|
for _, item := range s {
|
||||||
|
set.Add(item)
|
||||||
|
}
|
||||||
|
return &set
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSetWith creates and returns a new set with the given elements.
|
||||||
|
// Operations on the resulting set are thread-safe.
|
||||||
|
func NewSetWith(elts ...interface{}) Set {
|
||||||
|
return NewSetFromSlice(elts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSetFromSlice creates and returns a reference to a set from an
|
||||||
|
// existing slice. Operations on the resulting set are thread-safe.
|
||||||
|
func NewSetFromSlice(s []interface{}) Set {
|
||||||
|
a := NewSet(s...)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewThreadUnsafeSet creates and returns a reference to an empty set.
|
||||||
|
// Operations on the resulting set are not thread-safe.
|
||||||
|
func NewThreadUnsafeSet() Set {
|
||||||
|
set := newThreadUnsafeSet()
|
||||||
|
return &set
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewThreadUnsafeSetFromSlice creates and returns a reference to a
|
||||||
|
// set from an existing slice. Operations on the resulting set are
|
||||||
|
// not thread-safe.
|
||||||
|
func NewThreadUnsafeSetFromSlice(s []interface{}) Set {
|
||||||
|
a := NewThreadUnsafeSet()
|
||||||
|
for _, item := range s {
|
||||||
|
a.Add(item)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
283
vendor/github.com/deckarep/golang-set/threadsafe.go
generated
vendored
Normal file
283
vendor/github.com/deckarep/golang-set/threadsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,283 @@
|
||||||
|
/*
|
||||||
|
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package mapset
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
type threadSafeSet struct {
|
||||||
|
s threadUnsafeSet
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newThreadSafeSet() threadSafeSet {
|
||||||
|
return threadSafeSet{s: newThreadUnsafeSet()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Add(i interface{}) bool {
|
||||||
|
set.Lock()
|
||||||
|
ret := set.s.Add(i)
|
||||||
|
set.Unlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Contains(i ...interface{}) bool {
|
||||||
|
set.RLock()
|
||||||
|
ret := set.s.Contains(i...)
|
||||||
|
set.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) IsSubset(other Set) bool {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
ret := set.s.IsSubset(&o.s)
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) IsProperSubset(other Set) bool {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
defer set.RUnlock()
|
||||||
|
o.RLock()
|
||||||
|
defer o.RUnlock()
|
||||||
|
|
||||||
|
return set.s.IsProperSubset(&o.s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) IsSuperset(other Set) bool {
|
||||||
|
return other.IsSubset(set)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) IsProperSuperset(other Set) bool {
|
||||||
|
return other.IsProperSubset(set)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Union(other Set) Set {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeUnion}
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Intersect(other Set) Set {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeIntersection}
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Difference(other Set) Set {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeDifference}
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) SymmetricDifference(other Set) Set {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeDifference}
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Clear() {
|
||||||
|
set.Lock()
|
||||||
|
set.s = newThreadUnsafeSet()
|
||||||
|
set.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Remove(i interface{}) {
|
||||||
|
set.Lock()
|
||||||
|
delete(set.s, i)
|
||||||
|
set.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Cardinality() int {
|
||||||
|
set.RLock()
|
||||||
|
defer set.RUnlock()
|
||||||
|
return len(set.s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Each(cb func(interface{}) bool) {
|
||||||
|
set.RLock()
|
||||||
|
for elem := range set.s {
|
||||||
|
if cb(elem) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
set.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Iter() <-chan interface{} {
|
||||||
|
ch := make(chan interface{})
|
||||||
|
go func() {
|
||||||
|
set.RLock()
|
||||||
|
|
||||||
|
for elem := range set.s {
|
||||||
|
ch <- elem
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
set.RUnlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Iterator() *Iterator {
|
||||||
|
iterator, ch, stopCh := newIterator()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
set.RLock()
|
||||||
|
L:
|
||||||
|
for elem := range set.s {
|
||||||
|
select {
|
||||||
|
case <-stopCh:
|
||||||
|
break L
|
||||||
|
case ch <- elem:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
set.RUnlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return iterator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Equal(other Set) bool {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
ret := set.s.Equal(&o.s)
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Clone() Set {
|
||||||
|
set.RLock()
|
||||||
|
|
||||||
|
unsafeClone := set.s.Clone().(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeClone}
|
||||||
|
set.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) String() string {
|
||||||
|
set.RLock()
|
||||||
|
ret := set.s.String()
|
||||||
|
set.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) PowerSet() Set {
|
||||||
|
set.RLock()
|
||||||
|
unsafePowerSet := set.s.PowerSet().(*threadUnsafeSet)
|
||||||
|
set.RUnlock()
|
||||||
|
|
||||||
|
ret := &threadSafeSet{s: newThreadUnsafeSet()}
|
||||||
|
for subset := range unsafePowerSet.Iter() {
|
||||||
|
unsafeSubset := subset.(*threadUnsafeSet)
|
||||||
|
ret.Add(&threadSafeSet{s: *unsafeSubset})
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Pop() interface{} {
|
||||||
|
set.Lock()
|
||||||
|
defer set.Unlock()
|
||||||
|
return set.s.Pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) CartesianProduct(other Set) Set {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeCartProduct}
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) ToSlice() []interface{} {
|
||||||
|
keys := make([]interface{}, 0, set.Cardinality())
|
||||||
|
set.RLock()
|
||||||
|
for elem := range set.s {
|
||||||
|
keys = append(keys, elem)
|
||||||
|
}
|
||||||
|
set.RUnlock()
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) MarshalJSON() ([]byte, error) {
|
||||||
|
set.RLock()
|
||||||
|
b, err := set.s.MarshalJSON()
|
||||||
|
set.RUnlock()
|
||||||
|
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) UnmarshalJSON(p []byte) error {
|
||||||
|
set.RLock()
|
||||||
|
err := set.s.UnmarshalJSON(p)
|
||||||
|
set.RUnlock()
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
337
vendor/github.com/deckarep/golang-set/threadunsafe.go
generated
vendored
Normal file
337
vendor/github.com/deckarep/golang-set/threadunsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,337 @@
|
||||||
|
/*
|
||||||
|
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package mapset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type threadUnsafeSet map[interface{}]struct{}
|
||||||
|
|
||||||
|
// An OrderedPair represents a 2-tuple of values.
|
||||||
|
type OrderedPair struct {
|
||||||
|
First interface{}
|
||||||
|
Second interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newThreadUnsafeSet() threadUnsafeSet {
|
||||||
|
return make(threadUnsafeSet)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal says whether two 2-tuples contain the same values in the same order.
|
||||||
|
func (pair *OrderedPair) Equal(other OrderedPair) bool {
|
||||||
|
if pair.First == other.First &&
|
||||||
|
pair.Second == other.Second {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Add(i interface{}) bool {
|
||||||
|
_, found := (*set)[i]
|
||||||
|
if found {
|
||||||
|
return false //False if it existed already
|
||||||
|
}
|
||||||
|
|
||||||
|
(*set)[i] = struct{}{}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Contains(i ...interface{}) bool {
|
||||||
|
for _, val := range i {
|
||||||
|
if _, ok := (*set)[val]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) IsSubset(other Set) bool {
|
||||||
|
_ = other.(*threadUnsafeSet)
|
||||||
|
for elem := range *set {
|
||||||
|
if !other.Contains(elem) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) IsProperSubset(other Set) bool {
|
||||||
|
return set.IsSubset(other) && !set.Equal(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) IsSuperset(other Set) bool {
|
||||||
|
return other.IsSubset(set)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) IsProperSuperset(other Set) bool {
|
||||||
|
return set.IsSuperset(other) && !set.Equal(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Union(other Set) Set {
|
||||||
|
o := other.(*threadUnsafeSet)
|
||||||
|
|
||||||
|
unionedSet := newThreadUnsafeSet()
|
||||||
|
|
||||||
|
for elem := range *set {
|
||||||
|
unionedSet.Add(elem)
|
||||||
|
}
|
||||||
|
for elem := range *o {
|
||||||
|
unionedSet.Add(elem)
|
||||||
|
}
|
||||||
|
return &unionedSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Intersect(other Set) Set {
|
||||||
|
o := other.(*threadUnsafeSet)
|
||||||
|
|
||||||
|
intersection := newThreadUnsafeSet()
|
||||||
|
// loop over smaller set
|
||||||
|
if set.Cardinality() < other.Cardinality() {
|
||||||
|
for elem := range *set {
|
||||||
|
if other.Contains(elem) {
|
||||||
|
intersection.Add(elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for elem := range *o {
|
||||||
|
if set.Contains(elem) {
|
||||||
|
intersection.Add(elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &intersection
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Difference(other Set) Set {
|
||||||
|
_ = other.(*threadUnsafeSet)
|
||||||
|
|
||||||
|
difference := newThreadUnsafeSet()
|
||||||
|
for elem := range *set {
|
||||||
|
if !other.Contains(elem) {
|
||||||
|
difference.Add(elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &difference
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) SymmetricDifference(other Set) Set {
|
||||||
|
_ = other.(*threadUnsafeSet)
|
||||||
|
|
||||||
|
aDiff := set.Difference(other)
|
||||||
|
bDiff := other.Difference(set)
|
||||||
|
return aDiff.Union(bDiff)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Clear() {
|
||||||
|
*set = newThreadUnsafeSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Remove(i interface{}) {
|
||||||
|
delete(*set, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Cardinality() int {
|
||||||
|
return len(*set)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Each(cb func(interface{}) bool) {
|
||||||
|
for elem := range *set {
|
||||||
|
if cb(elem) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Iter() <-chan interface{} {
|
||||||
|
ch := make(chan interface{})
|
||||||
|
go func() {
|
||||||
|
for elem := range *set {
|
||||||
|
ch <- elem
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Iterator() *Iterator {
|
||||||
|
iterator, ch, stopCh := newIterator()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
L:
|
||||||
|
for elem := range *set {
|
||||||
|
select {
|
||||||
|
case <-stopCh:
|
||||||
|
break L
|
||||||
|
case ch <- elem:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return iterator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Equal(other Set) bool {
|
||||||
|
_ = other.(*threadUnsafeSet)
|
||||||
|
|
||||||
|
if set.Cardinality() != other.Cardinality() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for elem := range *set {
|
||||||
|
if !other.Contains(elem) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Clone() Set {
|
||||||
|
clonedSet := newThreadUnsafeSet()
|
||||||
|
for elem := range *set {
|
||||||
|
clonedSet.Add(elem)
|
||||||
|
}
|
||||||
|
return &clonedSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) String() string {
|
||||||
|
items := make([]string, 0, len(*set))
|
||||||
|
|
||||||
|
for elem := range *set {
|
||||||
|
items = append(items, fmt.Sprintf("%v", elem))
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("Set{%s}", strings.Join(items, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
// String outputs a 2-tuple in the form "(A, B)".
|
||||||
|
func (pair OrderedPair) String() string {
|
||||||
|
return fmt.Sprintf("(%v, %v)", pair.First, pair.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Pop() interface{} {
|
||||||
|
for item := range *set {
|
||||||
|
delete(*set, item)
|
||||||
|
return item
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) PowerSet() Set {
|
||||||
|
powSet := NewThreadUnsafeSet()
|
||||||
|
nullset := newThreadUnsafeSet()
|
||||||
|
powSet.Add(&nullset)
|
||||||
|
|
||||||
|
for es := range *set {
|
||||||
|
u := newThreadUnsafeSet()
|
||||||
|
j := powSet.Iter()
|
||||||
|
for er := range j {
|
||||||
|
p := newThreadUnsafeSet()
|
||||||
|
if reflect.TypeOf(er).Name() == "" {
|
||||||
|
k := er.(*threadUnsafeSet)
|
||||||
|
for ek := range *(k) {
|
||||||
|
p.Add(ek)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p.Add(er)
|
||||||
|
}
|
||||||
|
p.Add(es)
|
||||||
|
u.Add(&p)
|
||||||
|
}
|
||||||
|
|
||||||
|
powSet = powSet.Union(&u)
|
||||||
|
}
|
||||||
|
|
||||||
|
return powSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) CartesianProduct(other Set) Set {
|
||||||
|
o := other.(*threadUnsafeSet)
|
||||||
|
cartProduct := NewThreadUnsafeSet()
|
||||||
|
|
||||||
|
for i := range *set {
|
||||||
|
for j := range *o {
|
||||||
|
elem := OrderedPair{First: i, Second: j}
|
||||||
|
cartProduct.Add(elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cartProduct
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) ToSlice() []interface{} {
|
||||||
|
keys := make([]interface{}, 0, set.Cardinality())
|
||||||
|
for elem := range *set {
|
||||||
|
keys = append(keys, elem)
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON creates a JSON array from the set, it marshals all elements
|
||||||
|
func (set *threadUnsafeSet) MarshalJSON() ([]byte, error) {
|
||||||
|
items := make([]string, 0, set.Cardinality())
|
||||||
|
|
||||||
|
for elem := range *set {
|
||||||
|
b, err := json.Marshal(elem)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
items = append(items, string(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
return []byte(fmt.Sprintf("[%s]", strings.Join(items, ","))), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON recreates a set from a JSON array, it only decodes
|
||||||
|
// primitive types. Numbers are decoded as json.Number.
|
||||||
|
func (set *threadUnsafeSet) UnmarshalJSON(b []byte) error {
|
||||||
|
var i []interface{}
|
||||||
|
|
||||||
|
d := json.NewDecoder(bytes.NewReader(b))
|
||||||
|
d.UseNumber()
|
||||||
|
err := d.Decode(&i)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range i {
|
||||||
|
switch t := v.(type) {
|
||||||
|
case []interface{}, map[string]interface{}:
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
set.Add(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
85
vendor/github.com/docker/cli/AUTHORS
generated
vendored
85
vendor/github.com/docker/cli/AUTHORS
generated
vendored
|
@ -8,8 +8,10 @@ Aaron.L.Xu <likexu@harmonycloud.cn>
|
||||||
Abdur Rehman <abdur_rehman@mentor.com>
|
Abdur Rehman <abdur_rehman@mentor.com>
|
||||||
Abhinandan Prativadi <abhi@docker.com>
|
Abhinandan Prativadi <abhi@docker.com>
|
||||||
Abin Shahab <ashahab@altiscale.com>
|
Abin Shahab <ashahab@altiscale.com>
|
||||||
|
Ace Tang <aceapril@126.com>
|
||||||
Addam Hardy <addam.hardy@gmail.com>
|
Addam Hardy <addam.hardy@gmail.com>
|
||||||
Adolfo Ochagavía <aochagavia92@gmail.com>
|
Adolfo Ochagavía <aochagavia92@gmail.com>
|
||||||
|
Adrian Plata <adrian.plata@docker.com>
|
||||||
Adrien Duermael <adrien@duermael.com>
|
Adrien Duermael <adrien@duermael.com>
|
||||||
Adrien Folie <folie.adrien@gmail.com>
|
Adrien Folie <folie.adrien@gmail.com>
|
||||||
Ahmet Alp Balkan <ahmetb@microsoft.com>
|
Ahmet Alp Balkan <ahmetb@microsoft.com>
|
||||||
|
@ -23,6 +25,7 @@ Albert Callarisa <shark234@gmail.com>
|
||||||
Aleksa Sarai <asarai@suse.de>
|
Aleksa Sarai <asarai@suse.de>
|
||||||
Alessandro Boch <aboch@tetrationanalytics.com>
|
Alessandro Boch <aboch@tetrationanalytics.com>
|
||||||
Alex Mavrogiannis <alex.mavrogiannis@docker.com>
|
Alex Mavrogiannis <alex.mavrogiannis@docker.com>
|
||||||
|
Alex Mayer <amayer5125@gmail.com>
|
||||||
Alexander Boyd <alex@opengroove.org>
|
Alexander Boyd <alex@opengroove.org>
|
||||||
Alexander Larsson <alexl@redhat.com>
|
Alexander Larsson <alexl@redhat.com>
|
||||||
Alexander Morozov <lk4d4@docker.com>
|
Alexander Morozov <lk4d4@docker.com>
|
||||||
|
@ -37,6 +40,7 @@ Amir Goldstein <amir73il@aquasec.com>
|
||||||
Amit Krishnan <amit.krishnan@oracle.com>
|
Amit Krishnan <amit.krishnan@oracle.com>
|
||||||
Amit Shukla <amit.shukla@docker.com>
|
Amit Shukla <amit.shukla@docker.com>
|
||||||
Amy Lindburg <amy.lindburg@docker.com>
|
Amy Lindburg <amy.lindburg@docker.com>
|
||||||
|
Anda Xu <anda.xu@docker.com>
|
||||||
Andrea Luzzardi <aluzzardi@gmail.com>
|
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||||
Andreas Köhler <andi5.py@gmx.net>
|
Andreas Köhler <andi5.py@gmx.net>
|
||||||
Andrew France <andrew@avito.co.uk>
|
Andrew France <andrew@avito.co.uk>
|
||||||
|
@ -50,10 +54,12 @@ Andy Goldstein <agoldste@redhat.com>
|
||||||
Andy Rothfusz <github@developersupport.net>
|
Andy Rothfusz <github@developersupport.net>
|
||||||
Anil Madhavapeddy <anil@recoil.org>
|
Anil Madhavapeddy <anil@recoil.org>
|
||||||
Ankush Agarwal <ankushagarwal11@gmail.com>
|
Ankush Agarwal <ankushagarwal11@gmail.com>
|
||||||
|
Anne Henmi <anne.henmi@docker.com>
|
||||||
Anton Polonskiy <anton.polonskiy@gmail.com>
|
Anton Polonskiy <anton.polonskiy@gmail.com>
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com>
|
Antonio Murdaca <antonio.murdaca@gmail.com>
|
||||||
Antonis Kalipetis <akalipetis@gmail.com>
|
Antonis Kalipetis <akalipetis@gmail.com>
|
||||||
Anusha Ragunathan <anusha.ragunathan@docker.com>
|
Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||||
|
Ao Li <la9249@163.com>
|
||||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||||
Ashwini Oruganti <ashwini.oruganti@gmail.com>
|
Ashwini Oruganti <ashwini.oruganti@gmail.com>
|
||||||
|
@ -63,8 +69,10 @@ Barnaby Gray <barnaby@pickle.me.uk>
|
||||||
Bastiaan Bakker <bbakker@xebia.com>
|
Bastiaan Bakker <bbakker@xebia.com>
|
||||||
BastianHofmann <bastianhofmann@me.com>
|
BastianHofmann <bastianhofmann@me.com>
|
||||||
Ben Bonnefoy <frenchben@docker.com>
|
Ben Bonnefoy <frenchben@docker.com>
|
||||||
|
Ben Creasy <ben@bencreasy.com>
|
||||||
Ben Firshman <ben@firshman.co.uk>
|
Ben Firshman <ben@firshman.co.uk>
|
||||||
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||||
|
Benoit Sigoure <tsunanet@gmail.com>
|
||||||
Bhumika Bayani <bhumikabayani@gmail.com>
|
Bhumika Bayani <bhumikabayani@gmail.com>
|
||||||
Bill Wang <ozbillwang@gmail.com>
|
Bill Wang <ozbillwang@gmail.com>
|
||||||
Bin Liu <liubin0329@gmail.com>
|
Bin Liu <liubin0329@gmail.com>
|
||||||
|
@ -73,6 +81,7 @@ Boaz Shuster <ripcurld.github@gmail.com>
|
||||||
Bogdan Anton <contact@bogdananton.ro>
|
Bogdan Anton <contact@bogdananton.ro>
|
||||||
Boris Pruessmann <boris@pruessmann.org>
|
Boris Pruessmann <boris@pruessmann.org>
|
||||||
Bradley Cicenas <bradley.cicenas@gmail.com>
|
Bradley Cicenas <bradley.cicenas@gmail.com>
|
||||||
|
Brandon Mitchell <git@bmitch.net>
|
||||||
Brandon Philips <brandon.philips@coreos.com>
|
Brandon Philips <brandon.philips@coreos.com>
|
||||||
Brent Salisbury <brent.salisbury@docker.com>
|
Brent Salisbury <brent.salisbury@docker.com>
|
||||||
Bret Fisher <bret@bretfisher.com>
|
Bret Fisher <bret@bretfisher.com>
|
||||||
|
@ -89,6 +98,7 @@ Carlos Alexandro Becker <caarlos0@gmail.com>
|
||||||
Ce Gao <ce.gao@outlook.com>
|
Ce Gao <ce.gao@outlook.com>
|
||||||
Cedric Davies <cedricda@microsoft.com>
|
Cedric Davies <cedricda@microsoft.com>
|
||||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||||
|
Chad Faragher <wyckster@hotmail.com>
|
||||||
Chao Wang <wangchao.fnst@cn.fujitsu.com>
|
Chao Wang <wangchao.fnst@cn.fujitsu.com>
|
||||||
Charles Chan <charleswhchan@users.noreply.github.com>
|
Charles Chan <charleswhchan@users.noreply.github.com>
|
||||||
Charles Law <claw@conduce.com>
|
Charles Law <claw@conduce.com>
|
||||||
|
@ -109,8 +119,9 @@ Christian Stefanescu <st.chris@gmail.com>
|
||||||
Christophe Robin <crobin@nekoo.com>
|
Christophe Robin <crobin@nekoo.com>
|
||||||
Christophe Vidal <kriss@krizalys.com>
|
Christophe Vidal <kriss@krizalys.com>
|
||||||
Christopher Biscardi <biscarch@sketcht.com>
|
Christopher Biscardi <biscarch@sketcht.com>
|
||||||
|
Christopher Crone <christopher.crone@docker.com>
|
||||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||||
Christy Perez <christy@linux.vnet.ibm.com>
|
Christy Norman <christy@linux.vnet.ibm.com>
|
||||||
Chun Chen <ramichen@tencent.com>
|
Chun Chen <ramichen@tencent.com>
|
||||||
Clinton Kitson <clintonskitson@gmail.com>
|
Clinton Kitson <clintonskitson@gmail.com>
|
||||||
Coenraad Loubser <coenraad@wish.org.za>
|
Coenraad Loubser <coenraad@wish.org.za>
|
||||||
|
@ -118,12 +129,15 @@ Colin Hebert <hebert.colin@gmail.com>
|
||||||
Collin Guarino <collin.guarino@gmail.com>
|
Collin Guarino <collin.guarino@gmail.com>
|
||||||
Colm Hally <colmhally@gmail.com>
|
Colm Hally <colmhally@gmail.com>
|
||||||
Corey Farrell <git@cfware.com>
|
Corey Farrell <git@cfware.com>
|
||||||
|
Corey Quon <corey.quon@docker.com>
|
||||||
|
Craig Wilhite <crwilhit@microsoft.com>
|
||||||
Cristian Staretu <cristian.staretu@gmail.com>
|
Cristian Staretu <cristian.staretu@gmail.com>
|
||||||
Daehyeok Mun <daehyeok@gmail.com>
|
Daehyeok Mun <daehyeok@gmail.com>
|
||||||
Dafydd Crosby <dtcrsby@gmail.com>
|
Dafydd Crosby <dtcrsby@gmail.com>
|
||||||
dalanlan <dalanlan925@gmail.com>
|
dalanlan <dalanlan925@gmail.com>
|
||||||
Damien Nadé <github@livna.org>
|
Damien Nadé <github@livna.org>
|
||||||
Dan Cotora <dan@bluevision.ro>
|
Dan Cotora <dan@bluevision.ro>
|
||||||
|
Daniel Cassidy <mail@danielcassidy.me.uk>
|
||||||
Daniel Dao <dqminh@cloudflare.com>
|
Daniel Dao <dqminh@cloudflare.com>
|
||||||
Daniel Farrell <dfarrell@redhat.com>
|
Daniel Farrell <dfarrell@redhat.com>
|
||||||
Daniel Gasienica <daniel@gasienica.ch>
|
Daniel Gasienica <daniel@gasienica.ch>
|
||||||
|
@ -147,6 +161,7 @@ David Cramer <davcrame@cisco.com>
|
||||||
David Dooling <dooling@gmail.com>
|
David Dooling <dooling@gmail.com>
|
||||||
David Gageot <david@gageot.net>
|
David Gageot <david@gageot.net>
|
||||||
David Lechner <david@lechnology.com>
|
David Lechner <david@lechnology.com>
|
||||||
|
David Scott <dave@recoil.org>
|
||||||
David Sheets <dsheets@docker.com>
|
David Sheets <dsheets@docker.com>
|
||||||
David Williamson <david.williamson@docker.com>
|
David Williamson <david.williamson@docker.com>
|
||||||
David Xia <dxia@spotify.com>
|
David Xia <dxia@spotify.com>
|
||||||
|
@ -173,9 +188,12 @@ Dong Chen <dongluo.chen@docker.com>
|
||||||
Doug Davis <dug@us.ibm.com>
|
Doug Davis <dug@us.ibm.com>
|
||||||
Drew Erny <drew.erny@docker.com>
|
Drew Erny <drew.erny@docker.com>
|
||||||
Ed Costello <epc@epcostello.com>
|
Ed Costello <epc@epcostello.com>
|
||||||
|
Elango Sivanandam <elango.siva@docker.com>
|
||||||
Eli Uriegas <eli.uriegas@docker.com>
|
Eli Uriegas <eli.uriegas@docker.com>
|
||||||
Eli Uriegas <seemethere101@gmail.com>
|
Eli Uriegas <seemethere101@gmail.com>
|
||||||
Elias Faxö <elias.faxo@tre.se>
|
Elias Faxö <elias.faxo@tre.se>
|
||||||
|
Elliot Luo <956941328@qq.com>
|
||||||
|
Eric Curtin <ericcurtin17@gmail.com>
|
||||||
Eric G. Noriega <enoriega@vizuri.com>
|
Eric G. Noriega <enoriega@vizuri.com>
|
||||||
Eric Rosenberg <ehaydenr@gmail.com>
|
Eric Rosenberg <ehaydenr@gmail.com>
|
||||||
Eric Sage <eric.david.sage@gmail.com>
|
Eric Sage <eric.david.sage@gmail.com>
|
||||||
|
@ -183,7 +201,9 @@ Eric-Olivier Lamey <eo@lamey.me>
|
||||||
Erica Windisch <erica@windisch.us>
|
Erica Windisch <erica@windisch.us>
|
||||||
Erik Hollensbe <github@hollensbe.org>
|
Erik Hollensbe <github@hollensbe.org>
|
||||||
Erik St. Martin <alakriti@gmail.com>
|
Erik St. Martin <alakriti@gmail.com>
|
||||||
|
Essam A. Hassan <es.hassan187@gmail.com>
|
||||||
Ethan Haynes <ethanhaynes@alumni.harvard.edu>
|
Ethan Haynes <ethanhaynes@alumni.harvard.edu>
|
||||||
|
Euan Kemp <euank@euank.com>
|
||||||
Eugene Yakubovich <eugene.yakubovich@coreos.com>
|
Eugene Yakubovich <eugene.yakubovich@coreos.com>
|
||||||
Evan Allrich <evan@unguku.com>
|
Evan Allrich <evan@unguku.com>
|
||||||
Evan Hazlett <ejhazlett@gmail.com>
|
Evan Hazlett <ejhazlett@gmail.com>
|
||||||
|
@ -194,10 +214,14 @@ Fabio Falci <fabiofalci@gmail.com>
|
||||||
Fabrizio Soppelsa <fsoppelsa@mirantis.com>
|
Fabrizio Soppelsa <fsoppelsa@mirantis.com>
|
||||||
Felix Hupfeld <felix@quobyte.com>
|
Felix Hupfeld <felix@quobyte.com>
|
||||||
Felix Rabe <felix@rabe.io>
|
Felix Rabe <felix@rabe.io>
|
||||||
|
Filip Jareš <filipjares@gmail.com>
|
||||||
Flavio Crisciani <flavio.crisciani@docker.com>
|
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||||
Florian Klein <florian.klein@free.fr>
|
Florian Klein <florian.klein@free.fr>
|
||||||
|
Forest Johnson <fjohnson@peoplenetonline.com>
|
||||||
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
|
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
|
||||||
|
François Scala <francois.scala@swiss-as.com>
|
||||||
Fred Lifton <fred.lifton@docker.com>
|
Fred Lifton <fred.lifton@docker.com>
|
||||||
|
Frederic Hemberger <mail@frederic-hemberger.de>
|
||||||
Frederick F. Kautz IV <fkautz@redhat.com>
|
Frederick F. Kautz IV <fkautz@redhat.com>
|
||||||
Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
|
Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
|
||||||
Frieder Bluemle <frieder.bluemle@gmail.com>
|
Frieder Bluemle <frieder.bluemle@gmail.com>
|
||||||
|
@ -210,11 +234,13 @@ George MacRorie <gmacr31@gmail.com>
|
||||||
George Xie <georgexsh@gmail.com>
|
George Xie <georgexsh@gmail.com>
|
||||||
Gianluca Borello <g.borello@gmail.com>
|
Gianluca Borello <g.borello@gmail.com>
|
||||||
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
|
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
|
||||||
|
Goksu Toprak <goksu.toprak@docker.com>
|
||||||
Gou Rao <gou@portworx.com>
|
Gou Rao <gou@portworx.com>
|
||||||
Grant Reaber <grant.reaber@gmail.com>
|
Grant Reaber <grant.reaber@gmail.com>
|
||||||
Greg Pflaum <gpflaum@users.noreply.github.com>
|
Greg Pflaum <gpflaum@users.noreply.github.com>
|
||||||
Guilhem Lettron <guilhem+github@lettron.fr>
|
Guilhem Lettron <guilhem+github@lettron.fr>
|
||||||
Guillaume J. Charmes <guillaume.charmes@docker.com>
|
Guillaume J. Charmes <guillaume.charmes@docker.com>
|
||||||
|
Guillaume Le Floch <glfloch@gmail.com>
|
||||||
gwx296173 <gaojing3@huawei.com>
|
gwx296173 <gaojing3@huawei.com>
|
||||||
Günther Jungbluth <gunther@gameslabs.net>
|
Günther Jungbluth <gunther@gameslabs.net>
|
||||||
Hakan Özler <hakan.ozler@kodcu.com>
|
Hakan Özler <hakan.ozler@kodcu.com>
|
||||||
|
@ -239,12 +265,14 @@ Ignacio Capurro <icapurrofagian@gmail.com>
|
||||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||||
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||||
Ilya Sotkov <ilya@sotkov.com>
|
Ilya Sotkov <ilya@sotkov.com>
|
||||||
|
Ioan Eugen Stan <eu@ieugen.ro>
|
||||||
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
||||||
Ivan Grcic <igrcic@gmail.com>
|
Ivan Grcic <igrcic@gmail.com>
|
||||||
Ivan Markin <sw@nogoegst.net>
|
Ivan Markin <sw@nogoegst.net>
|
||||||
Jacob Atzen <jacob@jacobatzen.dk>
|
Jacob Atzen <jacob@jacobatzen.dk>
|
||||||
Jacob Tomlinson <jacob@tom.linson.uk>
|
Jacob Tomlinson <jacob@tom.linson.uk>
|
||||||
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||||
|
Jake Lambert <jake.lambert@volusion.com>
|
||||||
Jake Sanders <jsand@google.com>
|
Jake Sanders <jsand@google.com>
|
||||||
James Nesbitt <james.nesbitt@wunderkraut.com>
|
James Nesbitt <james.nesbitt@wunderkraut.com>
|
||||||
James Turnbull <james@lovedthanlost.net>
|
James Turnbull <james@lovedthanlost.net>
|
||||||
|
@ -258,8 +286,9 @@ Jasmine Hegman <jasmine@jhegman.com>
|
||||||
Jason Heiss <jheiss@aput.net>
|
Jason Heiss <jheiss@aput.net>
|
||||||
Jason Plum <jplum@devonit.com>
|
Jason Plum <jplum@devonit.com>
|
||||||
Jay Kamat <github@jgkamat.33mail.com>
|
Jay Kamat <github@jgkamat.33mail.com>
|
||||||
|
Jean Rouge <rougej+github@gmail.com>
|
||||||
|
Jean-Christophe Sirot <jean-christophe.sirot@docker.com>
|
||||||
Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
|
Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
|
||||||
Jean-Pierre Huynh <jp@moogsoft.com>
|
|
||||||
Jeff Lindsay <progrium@gmail.com>
|
Jeff Lindsay <progrium@gmail.com>
|
||||||
Jeff Nickoloff <jeff.nickoloff@gmail.com>
|
Jeff Nickoloff <jeff.nickoloff@gmail.com>
|
||||||
Jeff Silberman <jsilberm@gmail.com>
|
Jeff Silberman <jsilberm@gmail.com>
|
||||||
|
@ -277,6 +306,7 @@ Jim Galasyn <jim.galasyn@docker.com>
|
||||||
Jimmy Leger <jimmy.leger@gmail.com>
|
Jimmy Leger <jimmy.leger@gmail.com>
|
||||||
Jimmy Song <rootsongjc@gmail.com>
|
Jimmy Song <rootsongjc@gmail.com>
|
||||||
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
||||||
|
Jintao Zhang <zhangjintao9020@gmail.com>
|
||||||
Joao Fernandes <joao.fernandes@docker.com>
|
Joao Fernandes <joao.fernandes@docker.com>
|
||||||
Joe Doliner <jdoliner@pachyderm.io>
|
Joe Doliner <jdoliner@pachyderm.io>
|
||||||
Joe Gordon <joe.gordon0@gmail.com>
|
Joe Gordon <joe.gordon0@gmail.com>
|
||||||
|
@ -314,7 +344,9 @@ Julien Maitrehenry <julien.maitrehenry@me.com>
|
||||||
Justas Brazauskas <brazauskasjustas@gmail.com>
|
Justas Brazauskas <brazauskasjustas@gmail.com>
|
||||||
Justin Cormack <justin.cormack@docker.com>
|
Justin Cormack <justin.cormack@docker.com>
|
||||||
Justin Simonelis <justin.p.simonelis@gmail.com>
|
Justin Simonelis <justin.p.simonelis@gmail.com>
|
||||||
|
Justyn Temme <justyntemme@gmail.com>
|
||||||
Jyrki Puttonen <jyrkiput@gmail.com>
|
Jyrki Puttonen <jyrkiput@gmail.com>
|
||||||
|
Jérémie Drouet <jeremie.drouet@gmail.com>
|
||||||
Jérôme Petazzoni <jerome.petazzoni@docker.com>
|
Jérôme Petazzoni <jerome.petazzoni@docker.com>
|
||||||
Jörg Thalheim <joerg@higgsboson.tk>
|
Jörg Thalheim <joerg@higgsboson.tk>
|
||||||
Kai Blin <kai@samba.org>
|
Kai Blin <kai@samba.org>
|
||||||
|
@ -323,6 +355,7 @@ Kara Alexandra <kalexandra@us.ibm.com>
|
||||||
Kareem Khazem <karkhaz@karkhaz.com>
|
Kareem Khazem <karkhaz@karkhaz.com>
|
||||||
Karthik Nayak <Karthik.188@gmail.com>
|
Karthik Nayak <Karthik.188@gmail.com>
|
||||||
Kat Samperi <kat.samperi@gmail.com>
|
Kat Samperi <kat.samperi@gmail.com>
|
||||||
|
Kathryn Spiers <kathryn@spiers.me>
|
||||||
Katie McLaughlin <katie@glasnt.com>
|
Katie McLaughlin <katie@glasnt.com>
|
||||||
Ke Xu <leonhartx.k@gmail.com>
|
Ke Xu <leonhartx.k@gmail.com>
|
||||||
Kei Ohmura <ohmura.kei@gmail.com>
|
Kei Ohmura <ohmura.kei@gmail.com>
|
||||||
|
@ -344,12 +377,12 @@ Krasi Georgiev <krasi@vip-consult.solutions>
|
||||||
Kris-Mikael Krister <krismikael@protonmail.com>
|
Kris-Mikael Krister <krismikael@protonmail.com>
|
||||||
Kun Zhang <zkazure@gmail.com>
|
Kun Zhang <zkazure@gmail.com>
|
||||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||||
Kyle Spiers <kyle@spiers.me>
|
|
||||||
Lachlan Cooper <lachlancooper@gmail.com>
|
Lachlan Cooper <lachlancooper@gmail.com>
|
||||||
Lai Jiangshan <jiangshanlai@gmail.com>
|
Lai Jiangshan <jiangshanlai@gmail.com>
|
||||||
Lars Kellogg-Stedman <lars@redhat.com>
|
Lars Kellogg-Stedman <lars@redhat.com>
|
||||||
Laura Frank <ljfrank@gmail.com>
|
Laura Frank <ljfrank@gmail.com>
|
||||||
Laurent Erignoux <lerignoux@gmail.com>
|
Laurent Erignoux <lerignoux@gmail.com>
|
||||||
|
Lee Gaines <eightlimbed@gmail.com>
|
||||||
Lei Jitang <leijitang@huawei.com>
|
Lei Jitang <leijitang@huawei.com>
|
||||||
Lennie <github@consolejunkie.net>
|
Lennie <github@consolejunkie.net>
|
||||||
Leo Gallucci <elgalu3@gmail.com>
|
Leo Gallucci <elgalu3@gmail.com>
|
||||||
|
@ -357,6 +390,8 @@ Lewis Daly <lewisdaly@me.com>
|
||||||
Li Yi <denverdino@gmail.com>
|
Li Yi <denverdino@gmail.com>
|
||||||
Li Yi <weiyuan.yl@alibaba-inc.com>
|
Li Yi <weiyuan.yl@alibaba-inc.com>
|
||||||
Liang-Chi Hsieh <viirya@gmail.com>
|
Liang-Chi Hsieh <viirya@gmail.com>
|
||||||
|
Lifubang <lifubang@acmcoder.com>
|
||||||
|
Lihua Tang <lhtang@alauda.io>
|
||||||
Lily Guo <lily.guo@docker.com>
|
Lily Guo <lily.guo@docker.com>
|
||||||
Lin Lu <doraalin@163.com>
|
Lin Lu <doraalin@163.com>
|
||||||
Linus Heckemann <lheckemann@twig-world.com>
|
Linus Heckemann <lheckemann@twig-world.com>
|
||||||
|
@ -384,18 +419,24 @@ Mansi Nahar <mmn4185@rit.edu>
|
||||||
mapk0y <mapk0y@gmail.com>
|
mapk0y <mapk0y@gmail.com>
|
||||||
Marc Bihlmaier <marc.bihlmaier@reddoxx.com>
|
Marc Bihlmaier <marc.bihlmaier@reddoxx.com>
|
||||||
Marco Mariani <marco.mariani@alterway.fr>
|
Marco Mariani <marco.mariani@alterway.fr>
|
||||||
|
Marco Vedovati <mvedovati@suse.com>
|
||||||
Marcus Martins <marcus@docker.com>
|
Marcus Martins <marcus@docker.com>
|
||||||
Marianna Tessel <mtesselh@gmail.com>
|
Marianna Tessel <mtesselh@gmail.com>
|
||||||
Marius Sturm <marius@graylog.com>
|
Marius Sturm <marius@graylog.com>
|
||||||
Mark Oates <fl0yd@me.com>
|
Mark Oates <fl0yd@me.com>
|
||||||
|
Marsh Macy <marsma@microsoft.com>
|
||||||
Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
|
Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
|
||||||
Mary Anthony <mary.anthony@docker.com>
|
Mary Anthony <mary.anthony@docker.com>
|
||||||
|
Mason Fish <mason.fish@docker.com>
|
||||||
Mason Malone <mason.malone@gmail.com>
|
Mason Malone <mason.malone@gmail.com>
|
||||||
Mateusz Major <apkd@users.noreply.github.com>
|
Mateusz Major <apkd@users.noreply.github.com>
|
||||||
|
Mathieu Champlon <mathieu.champlon@docker.com>
|
||||||
Matt Gucci <matt9ucci@gmail.com>
|
Matt Gucci <matt9ucci@gmail.com>
|
||||||
Matt Robenolt <matt@ydekproductions.com>
|
Matt Robenolt <matt@ydekproductions.com>
|
||||||
|
Matteo Orefice <matteo.orefice@bites4bits.software>
|
||||||
Matthew Heon <mheon@redhat.com>
|
Matthew Heon <mheon@redhat.com>
|
||||||
Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
||||||
|
Mauro Porras P <mauroporrasp@gmail.com>
|
||||||
Max Shytikov <mshytikov@gmail.com>
|
Max Shytikov <mshytikov@gmail.com>
|
||||||
Maxime Petazzoni <max@signalfuse.com>
|
Maxime Petazzoni <max@signalfuse.com>
|
||||||
Mei ChunTao <mei.chuntao@zte.com.cn>
|
Mei ChunTao <mei.chuntao@zte.com.cn>
|
||||||
|
@ -425,9 +466,11 @@ Mike MacCana <mike.maccana@gmail.com>
|
||||||
mikelinjie <294893458@qq.com>
|
mikelinjie <294893458@qq.com>
|
||||||
Mikhail Vasin <vasin@cloud-tv.ru>
|
Mikhail Vasin <vasin@cloud-tv.ru>
|
||||||
Milind Chawre <milindchawre@gmail.com>
|
Milind Chawre <milindchawre@gmail.com>
|
||||||
|
Mindaugas Rukas <momomg@gmail.com>
|
||||||
Misty Stanley-Jones <misty@docker.com>
|
Misty Stanley-Jones <misty@docker.com>
|
||||||
Mohammad Banikazemi <mb@us.ibm.com>
|
Mohammad Banikazemi <mb@us.ibm.com>
|
||||||
Mohammed Aaqib Ansari <maaquib@gmail.com>
|
Mohammed Aaqib Ansari <maaquib@gmail.com>
|
||||||
|
Mohini Anne Dsouza <mohini3917@gmail.com>
|
||||||
Moorthy RS <rsmoorthy@gmail.com>
|
Moorthy RS <rsmoorthy@gmail.com>
|
||||||
Morgan Bauer <mbauer@us.ibm.com>
|
Morgan Bauer <mbauer@us.ibm.com>
|
||||||
Moysés Borges <moysesb@gmail.com>
|
Moysés Borges <moysesb@gmail.com>
|
||||||
|
@ -435,9 +478,11 @@ Mrunal Patel <mrunalp@gmail.com>
|
||||||
muicoder <muicoder@gmail.com>
|
muicoder <muicoder@gmail.com>
|
||||||
Muthukumar R <muthur@gmail.com>
|
Muthukumar R <muthur@gmail.com>
|
||||||
Máximo Cuadros <mcuadros@gmail.com>
|
Máximo Cuadros <mcuadros@gmail.com>
|
||||||
|
Mårten Cassel <marten.cassel@gmail.com>
|
||||||
Nace Oroz <orkica@gmail.com>
|
Nace Oroz <orkica@gmail.com>
|
||||||
Nahum Shalman <nshalman@omniti.com>
|
Nahum Shalman <nshalman@omniti.com>
|
||||||
Nalin Dahyabhai <nalin@redhat.com>
|
Nalin Dahyabhai <nalin@redhat.com>
|
||||||
|
Nao YONASHIRO <owan.orisano@gmail.com>
|
||||||
Nassim 'Nass' Eddequiouaq <eddequiouaq.nassim@gmail.com>
|
Nassim 'Nass' Eddequiouaq <eddequiouaq.nassim@gmail.com>
|
||||||
Natalie Parker <nparker@omnifone.com>
|
Natalie Parker <nparker@omnifone.com>
|
||||||
Nate Brennand <nate.brennand@clever.com>
|
Nate Brennand <nate.brennand@clever.com>
|
||||||
|
@ -445,18 +490,22 @@ Nathan Hsieh <hsieh.nathan@gmail.com>
|
||||||
Nathan LeClaire <nathan.leclaire@docker.com>
|
Nathan LeClaire <nathan.leclaire@docker.com>
|
||||||
Nathan McCauley <nathan.mccauley@docker.com>
|
Nathan McCauley <nathan.mccauley@docker.com>
|
||||||
Neil Peterson <neilpeterson@outlook.com>
|
Neil Peterson <neilpeterson@outlook.com>
|
||||||
|
Nick Adcock <nick.adcock@docker.com>
|
||||||
|
Nico Stapelbroek <nstapelbroek@gmail.com>
|
||||||
Nicola Kabar <nicolaka@gmail.com>
|
Nicola Kabar <nicolaka@gmail.com>
|
||||||
Nicolas Borboën <ponsfrilus@gmail.com>
|
Nicolas Borboën <ponsfrilus@gmail.com>
|
||||||
Nicolas De Loof <nicolas.deloof@gmail.com>
|
Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||||
Nikhil Chawla <chawlanikhil24@gmail.com>
|
Nikhil Chawla <chawlanikhil24@gmail.com>
|
||||||
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||||
Nikolay Milovanov <nmil@itransformers.net>
|
Nikolay Milovanov <nmil@itransformers.net>
|
||||||
|
Nir Soffer <nsoffer@redhat.com>
|
||||||
Nishant Totla <nishanttotla@gmail.com>
|
Nishant Totla <nishanttotla@gmail.com>
|
||||||
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
||||||
Noah Treuhaft <noah.treuhaft@docker.com>
|
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||||
O.S. Tezer <ostezer@gmail.com>
|
O.S. Tezer <ostezer@gmail.com>
|
||||||
ohmystack <jun.jiang02@ele.me>
|
ohmystack <jun.jiang02@ele.me>
|
||||||
Olle Jonsson <olle.jonsson@gmail.com>
|
Olle Jonsson <olle.jonsson@gmail.com>
|
||||||
|
Olli Janatuinen <olli.janatuinen@gmail.com>
|
||||||
Otto Kekäläinen <otto@seravo.fi>
|
Otto Kekäläinen <otto@seravo.fi>
|
||||||
Ovidio Mallo <ovidio.mallo@gmail.com>
|
Ovidio Mallo <ovidio.mallo@gmail.com>
|
||||||
Pascal Borreli <pascal@borreli.com>
|
Pascal Borreli <pascal@borreli.com>
|
||||||
|
@ -474,12 +523,14 @@ Per Lundberg <per.lundberg@ecraft.com>
|
||||||
Peter Edge <peter.edge@gmail.com>
|
Peter Edge <peter.edge@gmail.com>
|
||||||
Peter Hsu <shhsu@microsoft.com>
|
Peter Hsu <shhsu@microsoft.com>
|
||||||
Peter Jaffe <pjaffe@nevo.com>
|
Peter Jaffe <pjaffe@nevo.com>
|
||||||
|
Peter Kehl <peter.kehl@gmail.com>
|
||||||
Peter Nagy <xificurC@gmail.com>
|
Peter Nagy <xificurC@gmail.com>
|
||||||
Peter Salvatore <peter@psftw.com>
|
Peter Salvatore <peter@psftw.com>
|
||||||
Peter Waller <p@pwaller.net>
|
Peter Waller <p@pwaller.net>
|
||||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||||
Philip Alexander Etling <paetling@gmail.com>
|
Philip Alexander Etling <paetling@gmail.com>
|
||||||
Philipp Gillé <philipp.gille@gmail.com>
|
Philipp Gillé <philipp.gille@gmail.com>
|
||||||
|
Philipp Schmied <pschmied@schutzwerk.com>
|
||||||
pidster <pid@pidster.com>
|
pidster <pid@pidster.com>
|
||||||
pixelistik <pixelistik@users.noreply.github.com>
|
pixelistik <pixelistik@users.noreply.github.com>
|
||||||
Pratik Karki <prertik@outlook.com>
|
Pratik Karki <prertik@outlook.com>
|
||||||
|
@ -490,6 +541,7 @@ Qiang Huang <h.huangqiang@huawei.com>
|
||||||
Qinglan Peng <qinglanpeng@zju.edu.cn>
|
Qinglan Peng <qinglanpeng@zju.edu.cn>
|
||||||
qudongfang <qudongfang@gmail.com>
|
qudongfang <qudongfang@gmail.com>
|
||||||
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
|
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
|
||||||
|
Ravi Shekhar Jethani <rsjethani@gmail.com>
|
||||||
Ray Tsang <rayt@google.com>
|
Ray Tsang <rayt@google.com>
|
||||||
Reficul <xuzhenglun@gmail.com>
|
Reficul <xuzhenglun@gmail.com>
|
||||||
Remy Suen <remy.suen@gmail.com>
|
Remy Suen <remy.suen@gmail.com>
|
||||||
|
@ -506,21 +558,27 @@ Robin Naundorf <r.naundorf@fh-muenster.de>
|
||||||
Robin Speekenbrink <robin@kingsquare.nl>
|
Robin Speekenbrink <robin@kingsquare.nl>
|
||||||
Rodolfo Ortiz <rodolfo.ortiz@definityfirst.com>
|
Rodolfo Ortiz <rodolfo.ortiz@definityfirst.com>
|
||||||
Rogelio Canedo <rcanedo@mappy.priv>
|
Rogelio Canedo <rcanedo@mappy.priv>
|
||||||
|
Rohan Verma <hello@rohanverma.net>
|
||||||
Roland Kammerer <roland.kammerer@linbit.com>
|
Roland Kammerer <roland.kammerer@linbit.com>
|
||||||
Roman Dudin <katrmr@gmail.com>
|
Roman Dudin <katrmr@gmail.com>
|
||||||
Rory Hunter <roryhunter2@gmail.com>
|
Rory Hunter <roryhunter2@gmail.com>
|
||||||
Ross Boucher <rboucher@gmail.com>
|
Ross Boucher <rboucher@gmail.com>
|
||||||
Rubens Figueiredo <r.figueiredo.52@gmail.com>
|
Rubens Figueiredo <r.figueiredo.52@gmail.com>
|
||||||
|
Rui Cao <ruicao@alauda.io>
|
||||||
Ryan Belgrave <rmb1993@gmail.com>
|
Ryan Belgrave <rmb1993@gmail.com>
|
||||||
Ryan Detzel <ryan.detzel@gmail.com>
|
Ryan Detzel <ryan.detzel@gmail.com>
|
||||||
Ryan Stelly <ryan.stelly@live.com>
|
Ryan Stelly <ryan.stelly@live.com>
|
||||||
|
Ryan Wilson-Perkin <ryanwilsonperkin@gmail.com>
|
||||||
|
Ryan Zhang <ryan.zhang@docker.com>
|
||||||
Sainath Grandhi <sainath.grandhi@intel.com>
|
Sainath Grandhi <sainath.grandhi@intel.com>
|
||||||
Sakeven Jiang <jc5930@sina.cn>
|
Sakeven Jiang <jc5930@sina.cn>
|
||||||
Sally O'Malley <somalley@redhat.com>
|
Sally O'Malley <somalley@redhat.com>
|
||||||
Sam Neirinck <sam@samneirinck.com>
|
Sam Neirinck <sam@samneirinck.com>
|
||||||
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
||||||
|
Sami Tabet <salph.tabet@gmail.com>
|
||||||
Samuel Karp <skarp@amazon.com>
|
Samuel Karp <skarp@amazon.com>
|
||||||
Santhosh Manohar <santhosh@docker.com>
|
Santhosh Manohar <santhosh@docker.com>
|
||||||
|
Scott Brenner <scott@scottbrenner.me>
|
||||||
Scott Collier <emailscottcollier@gmail.com>
|
Scott Collier <emailscottcollier@gmail.com>
|
||||||
Sean Christopherson <sean.j.christopherson@intel.com>
|
Sean Christopherson <sean.j.christopherson@intel.com>
|
||||||
Sean Rodman <srodman7689@gmail.com>
|
Sean Rodman <srodman7689@gmail.com>
|
||||||
|
@ -548,27 +606,33 @@ Spencer Brown <spencer@spencerbrown.org>
|
||||||
squeegels <1674195+squeegels@users.noreply.github.com>
|
squeegels <1674195+squeegels@users.noreply.github.com>
|
||||||
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||||
Stefan S. <tronicum@user.github.com>
|
Stefan S. <tronicum@user.github.com>
|
||||||
Stefan Scherer <scherer_stefan@icloud.com>
|
Stefan Scherer <stefan.scherer@docker.com>
|
||||||
Stefan Weil <sw@weilnetz.de>
|
Stefan Weil <sw@weilnetz.de>
|
||||||
|
Stephane Jeandeaux <stephane.jeandeaux@gmail.com>
|
||||||
Stephen Day <stevvooe@gmail.com>
|
Stephen Day <stevvooe@gmail.com>
|
||||||
Stephen Rust <srust@blockbridge.com>
|
Stephen Rust <srust@blockbridge.com>
|
||||||
Steve Durrheimer <s.durrheimer@gmail.com>
|
Steve Durrheimer <s.durrheimer@gmail.com>
|
||||||
|
Steve Richards <steve.richards@docker.com>
|
||||||
Steven Burgess <steven.a.burgess@hotmail.com>
|
Steven Burgess <steven.a.burgess@hotmail.com>
|
||||||
Subhajit Ghosh <isubuz.g@gmail.com>
|
Subhajit Ghosh <isubuz.g@gmail.com>
|
||||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||||
|
Sune Keller <absukl@almbrand.dk>
|
||||||
Sungwon Han <sungwon.han@navercorp.com>
|
Sungwon Han <sungwon.han@navercorp.com>
|
||||||
|
Sunny Gogoi <indiasuny000@gmail.com>
|
||||||
Sven Dowideit <SvenDowideit@home.org.au>
|
Sven Dowideit <SvenDowideit@home.org.au>
|
||||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
Sylvain Baubeau <sbaubeau@redhat.com>
|
||||||
Sébastien HOUZÉ <cto@verylastroom.com>
|
Sébastien HOUZÉ <cto@verylastroom.com>
|
||||||
T K Sourabh <sourabhtk37@gmail.com>
|
T K Sourabh <sourabhtk37@gmail.com>
|
||||||
TAGOMORI Satoshi <tagomoris@gmail.com>
|
TAGOMORI Satoshi <tagomoris@gmail.com>
|
||||||
|
taiji-tech <csuhqg@foxmail.com>
|
||||||
Taylor Jones <monitorjbl@gmail.com>
|
Taylor Jones <monitorjbl@gmail.com>
|
||||||
|
Tejaswini Duggaraju <naduggar@microsoft.com>
|
||||||
Thatcher Peskens <thatcher@docker.com>
|
Thatcher Peskens <thatcher@docker.com>
|
||||||
Thomas Gazagnaire <thomas@gazagnaire.org>
|
Thomas Gazagnaire <thomas@gazagnaire.org>
|
||||||
Thomas Krzero <thomas.kovatchitch@gmail.com>
|
Thomas Krzero <thomas.kovatchitch@gmail.com>
|
||||||
Thomas Leonard <thomas.leonard@docker.com>
|
Thomas Leonard <thomas.leonard@docker.com>
|
||||||
Thomas Léveil <thomasleveil@gmail.com>
|
Thomas Léveil <thomasleveil@gmail.com>
|
||||||
Thomas Riccardi <riccardi@systran.fr>
|
Thomas Riccardi <thomas@deepomatic.com>
|
||||||
Thomas Swift <tgs242@gmail.com>
|
Thomas Swift <tgs242@gmail.com>
|
||||||
Tianon Gravi <admwiggin@gmail.com>
|
Tianon Gravi <admwiggin@gmail.com>
|
||||||
Tianyi Wang <capkurmagati@gmail.com>
|
Tianyi Wang <capkurmagati@gmail.com>
|
||||||
|
@ -585,6 +649,8 @@ Tobias Gesellchen <tobias@gesellix.de>
|
||||||
Todd Whiteman <todd.whiteman@joyent.com>
|
Todd Whiteman <todd.whiteman@joyent.com>
|
||||||
Tom Denham <tom@tomdee.co.uk>
|
Tom Denham <tom@tomdee.co.uk>
|
||||||
Tom Fotherby <tom+github@peopleperhour.com>
|
Tom Fotherby <tom+github@peopleperhour.com>
|
||||||
|
Tom Klingenberg <tklingenberg@lastflood.net>
|
||||||
|
Tom Milligan <code@tommilligan.net>
|
||||||
Tom X. Tobin <tomxtobin@tomxtobin.com>
|
Tom X. Tobin <tomxtobin@tomxtobin.com>
|
||||||
Tomas Tomecek <ttomecek@redhat.com>
|
Tomas Tomecek <ttomecek@redhat.com>
|
||||||
Tomasz Kopczynski <tomek@kopczynski.net.pl>
|
Tomasz Kopczynski <tomek@kopczynski.net.pl>
|
||||||
|
@ -597,12 +663,14 @@ Tristan Carel <tristan@cogniteev.com>
|
||||||
Tycho Andersen <tycho@docker.com>
|
Tycho Andersen <tycho@docker.com>
|
||||||
Tycho Andersen <tycho@tycho.ws>
|
Tycho Andersen <tycho@tycho.ws>
|
||||||
uhayate <uhayate.gong@daocloud.io>
|
uhayate <uhayate.gong@daocloud.io>
|
||||||
|
Ulysses Souza <ulysses.souza@docker.com>
|
||||||
Umesh Yadav <umesh4257@gmail.com>
|
Umesh Yadav <umesh4257@gmail.com>
|
||||||
Valentin Lorentz <progval+git@progval.net>
|
Valentin Lorentz <progval+git@progval.net>
|
||||||
Veres Lajos <vlajos@gmail.com>
|
Veres Lajos <vlajos@gmail.com>
|
||||||
Victor Vieux <victor.vieux@docker.com>
|
Victor Vieux <victor.vieux@docker.com>
|
||||||
Victoria Bialas <victoria.bialas@docker.com>
|
Victoria Bialas <victoria.bialas@docker.com>
|
||||||
Viktor Stanchev <me@viktorstanchev.com>
|
Viktor Stanchev <me@viktorstanchev.com>
|
||||||
|
Vimal Raghubir <vraghubir0418@gmail.com>
|
||||||
Vincent Batts <vbatts@redhat.com>
|
Vincent Batts <vbatts@redhat.com>
|
||||||
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
||||||
Vincent Demeester <vincent.demeester@docker.com>
|
Vincent Demeester <vincent.demeester@docker.com>
|
||||||
|
@ -610,6 +678,7 @@ Vincent Woo <me@vincentwoo.com>
|
||||||
Vishnu Kannan <vishnuk@google.com>
|
Vishnu Kannan <vishnuk@google.com>
|
||||||
Vivek Goyal <vgoyal@redhat.com>
|
Vivek Goyal <vgoyal@redhat.com>
|
||||||
Wang Jie <wangjie5@chinaskycloud.com>
|
Wang Jie <wangjie5@chinaskycloud.com>
|
||||||
|
Wang Lei <wanglei@tenxcloud.com>
|
||||||
Wang Long <long.wanglong@huawei.com>
|
Wang Long <long.wanglong@huawei.com>
|
||||||
Wang Ping <present.wp@icloud.com>
|
Wang Ping <present.wp@icloud.com>
|
||||||
Wang Xing <hzwangxing@corp.netease.com>
|
Wang Xing <hzwangxing@corp.netease.com>
|
||||||
|
@ -622,6 +691,8 @@ Wes Morgan <cap10morgan@gmail.com>
|
||||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||||
William Henry <whenry@redhat.com>
|
William Henry <whenry@redhat.com>
|
||||||
Xianglin Gao <xlgao@zju.edu.cn>
|
Xianglin Gao <xlgao@zju.edu.cn>
|
||||||
|
Xiaodong Zhang <a4012017@sina.com>
|
||||||
|
Xiaoxi He <xxhe@alauda.io>
|
||||||
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||||
Xuecong Liao <satorulogic@gmail.com>
|
Xuecong Liao <satorulogic@gmail.com>
|
||||||
Yan Feng <yanfeng2@huawei.com>
|
Yan Feng <yanfeng2@huawei.com>
|
||||||
|
@ -633,7 +704,10 @@ Yong Tang <yong.tang.github@outlook.com>
|
||||||
Yosef Fertel <yfertel@gmail.com>
|
Yosef Fertel <yfertel@gmail.com>
|
||||||
Yu Peng <yu.peng36@zte.com.cn>
|
Yu Peng <yu.peng36@zte.com.cn>
|
||||||
Yuan Sun <sunyuan3@huawei.com>
|
Yuan Sun <sunyuan3@huawei.com>
|
||||||
|
Yue Zhang <zy675793960@yeah.net>
|
||||||
Yunxiang Huang <hyxqshk@vip.qq.com>
|
Yunxiang Huang <hyxqshk@vip.qq.com>
|
||||||
|
Zachary Romero <zacromero3@gmail.com>
|
||||||
|
Zander Mackie <zmackie@gmail.com>
|
||||||
zebrilee <zebrilee@gmail.com>
|
zebrilee <zebrilee@gmail.com>
|
||||||
Zhang Kun <zkazure@gmail.com>
|
Zhang Kun <zkazure@gmail.com>
|
||||||
Zhang Wei <zhangwei555@huawei.com>
|
Zhang Wei <zhangwei555@huawei.com>
|
||||||
|
@ -641,6 +715,7 @@ Zhang Wentao <zhangwentao234@huawei.com>
|
||||||
ZhangHang <stevezhang2014@gmail.com>
|
ZhangHang <stevezhang2014@gmail.com>
|
||||||
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
||||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||||
|
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||||
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
||||||
Álex González <agonzalezro@gmail.com>
|
Álex González <agonzalezro@gmail.com>
|
||||||
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||||
|
|
2
vendor/github.com/docker/cli/NOTICE
generated
vendored
2
vendor/github.com/docker/cli/NOTICE
generated
vendored
|
@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc.
|
||||||
|
|
||||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||||
|
|
||||||
This product contains software (https://github.com/kr/pty) developed
|
This product contains software (https://github.com/creack/pty) developed
|
||||||
by Keith Rarick, licensed under the MIT License.
|
by Keith Rarick, licensed under the MIT License.
|
||||||
|
|
||||||
The following is courtesy of our legal counsel:
|
The following is courtesy of our legal counsel:
|
||||||
|
|
|
@ -5,10 +5,11 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/cli/cli/config/configfile"
|
"github.com/docker/cli/cli/config/configfile"
|
||||||
"github.com/docker/cli/cli/config/credentials"
|
"github.com/docker/cli/cli/config/credentials"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/cli/cli/config/types"
|
||||||
"github.com/docker/docker/pkg/homedir"
|
"github.com/docker/docker/pkg/homedir"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
@ -18,6 +19,7 @@ const (
|
||||||
ConfigFileName = "config.json"
|
ConfigFileName = "config.json"
|
||||||
configFileDir = ".docker"
|
configFileDir = ".docker"
|
||||||
oldConfigfile = ".dockercfg"
|
oldConfigfile = ".dockercfg"
|
||||||
|
contextsDir = "contexts"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -35,9 +37,23 @@ func Dir() string {
|
||||||
return configDir
|
return configDir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ContextStoreDir returns the directory the docker contexts are stored in
|
||||||
|
func ContextStoreDir() string {
|
||||||
|
return filepath.Join(Dir(), contextsDir)
|
||||||
|
}
|
||||||
|
|
||||||
// SetDir sets the directory the configuration file is stored in
|
// SetDir sets the directory the configuration file is stored in
|
||||||
func SetDir(dir string) {
|
func SetDir(dir string) {
|
||||||
configDir = dir
|
configDir = filepath.Clean(dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns the path to a file relative to the config dir
|
||||||
|
func Path(p ...string) (string, error) {
|
||||||
|
path := filepath.Join(append([]string{Dir()}, p...)...)
|
||||||
|
if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) {
|
||||||
|
return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir())
|
||||||
|
}
|
||||||
|
return path, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from
|
// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from
|
98
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
98
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
|
@ -11,8 +11,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/cli/cli/config/credentials"
|
"github.com/docker/cli/cli/config/credentials"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/cli/config/types"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,29 +24,33 @@ const (
|
||||||
|
|
||||||
// ConfigFile ~/.docker/config.json file info
|
// ConfigFile ~/.docker/config.json file info
|
||||||
type ConfigFile struct {
|
type ConfigFile struct {
|
||||||
AuthConfigs map[string]types.AuthConfig `json:"auths"`
|
AuthConfigs map[string]types.AuthConfig `json:"auths"`
|
||||||
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
|
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
|
||||||
PsFormat string `json:"psFormat,omitempty"`
|
PsFormat string `json:"psFormat,omitempty"`
|
||||||
ImagesFormat string `json:"imagesFormat,omitempty"`
|
ImagesFormat string `json:"imagesFormat,omitempty"`
|
||||||
NetworksFormat string `json:"networksFormat,omitempty"`
|
NetworksFormat string `json:"networksFormat,omitempty"`
|
||||||
PluginsFormat string `json:"pluginsFormat,omitempty"`
|
PluginsFormat string `json:"pluginsFormat,omitempty"`
|
||||||
VolumesFormat string `json:"volumesFormat,omitempty"`
|
VolumesFormat string `json:"volumesFormat,omitempty"`
|
||||||
StatsFormat string `json:"statsFormat,omitempty"`
|
StatsFormat string `json:"statsFormat,omitempty"`
|
||||||
DetachKeys string `json:"detachKeys,omitempty"`
|
DetachKeys string `json:"detachKeys,omitempty"`
|
||||||
CredentialsStore string `json:"credsStore,omitempty"`
|
CredentialsStore string `json:"credsStore,omitempty"`
|
||||||
CredentialHelpers map[string]string `json:"credHelpers,omitempty"`
|
CredentialHelpers map[string]string `json:"credHelpers,omitempty"`
|
||||||
Filename string `json:"-"` // Note: for internal use only
|
Filename string `json:"-"` // Note: for internal use only
|
||||||
ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"`
|
ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"`
|
||||||
ServicesFormat string `json:"servicesFormat,omitempty"`
|
ServicesFormat string `json:"servicesFormat,omitempty"`
|
||||||
TasksFormat string `json:"tasksFormat,omitempty"`
|
TasksFormat string `json:"tasksFormat,omitempty"`
|
||||||
SecretFormat string `json:"secretFormat,omitempty"`
|
SecretFormat string `json:"secretFormat,omitempty"`
|
||||||
ConfigFormat string `json:"configFormat,omitempty"`
|
ConfigFormat string `json:"configFormat,omitempty"`
|
||||||
NodesFormat string `json:"nodesFormat,omitempty"`
|
NodesFormat string `json:"nodesFormat,omitempty"`
|
||||||
PruneFilters []string `json:"pruneFilters,omitempty"`
|
PruneFilters []string `json:"pruneFilters,omitempty"`
|
||||||
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
||||||
Experimental string `json:"experimental,omitempty"`
|
Experimental string `json:"experimental,omitempty"`
|
||||||
StackOrchestrator string `json:"stackOrchestrator,omitempty"`
|
StackOrchestrator string `json:"stackOrchestrator,omitempty"`
|
||||||
Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"`
|
Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"`
|
||||||
|
CurrentContext string `json:"currentContext,omitempty"`
|
||||||
|
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
|
||||||
|
Plugins map[string]map[string]string `json:"plugins,omitempty"`
|
||||||
|
Aliases map[string]string `json:"aliases,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProxyConfig contains proxy configuration settings
|
// ProxyConfig contains proxy configuration settings
|
||||||
|
@ -69,6 +72,8 @@ func New(fn string) *ConfigFile {
|
||||||
AuthConfigs: make(map[string]types.AuthConfig),
|
AuthConfigs: make(map[string]types.AuthConfig),
|
||||||
HTTPHeaders: make(map[string]string),
|
HTTPHeaders: make(map[string]string),
|
||||||
Filename: fn,
|
Filename: fn,
|
||||||
|
Plugins: make(map[string]map[string]string),
|
||||||
|
Aliases: make(map[string]string),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,7 +199,7 @@ func (configFile *ConfigFile) Save() error {
|
||||||
|
|
||||||
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
|
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
|
||||||
// then checking this against any environment variables provided to the container
|
// then checking this against any environment variables provided to the container
|
||||||
func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts []string) map[string]*string {
|
func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string {
|
||||||
var cfgKey string
|
var cfgKey string
|
||||||
|
|
||||||
if _, ok := configFile.Proxies[host]; !ok {
|
if _, ok := configFile.Proxies[host]; !ok {
|
||||||
|
@ -210,7 +215,10 @@ func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts []string) ma
|
||||||
"NO_PROXY": &config.NoProxy,
|
"NO_PROXY": &config.NoProxy,
|
||||||
"FTP_PROXY": &config.FTPProxy,
|
"FTP_PROXY": &config.FTPProxy,
|
||||||
}
|
}
|
||||||
m := opts.ConvertKVStringsToMapWithNil(runOpts)
|
m := runOpts
|
||||||
|
if m == nil {
|
||||||
|
m = make(map[string]*string)
|
||||||
|
}
|
||||||
for k := range permitted {
|
for k := range permitted {
|
||||||
if *permitted[k] == "" {
|
if *permitted[k] == "" {
|
||||||
continue
|
continue
|
||||||
|
@ -326,6 +334,42 @@ func (configFile *ConfigFile) GetFilename() string {
|
||||||
return configFile.Filename
|
return configFile.Filename
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PluginConfig retrieves the requested option for the given plugin.
|
||||||
|
func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) {
|
||||||
|
if configFile.Plugins == nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
pluginConfig, ok := configFile.Plugins[pluginname]
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
value, ok := pluginConfig[option]
|
||||||
|
return value, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPluginConfig sets the option to the given value for the given
|
||||||
|
// plugin. Passing a value of "" will remove the option. If removing
|
||||||
|
// the final config item for a given plugin then also cleans up the
|
||||||
|
// overall plugin entry.
|
||||||
|
func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) {
|
||||||
|
if configFile.Plugins == nil {
|
||||||
|
configFile.Plugins = make(map[string]map[string]string)
|
||||||
|
}
|
||||||
|
pluginConfig, ok := configFile.Plugins[pluginname]
|
||||||
|
if !ok {
|
||||||
|
pluginConfig = make(map[string]string)
|
||||||
|
configFile.Plugins[pluginname] = pluginConfig
|
||||||
|
}
|
||||||
|
if value != "" {
|
||||||
|
pluginConfig[option] = value
|
||||||
|
} else {
|
||||||
|
delete(pluginConfig, option)
|
||||||
|
}
|
||||||
|
if len(pluginConfig) == 0 {
|
||||||
|
delete(configFile.Plugins, pluginname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error {
|
func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error {
|
||||||
if kubeConfig == nil {
|
if kubeConfig == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
2
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
2
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
|
@ -1,7 +1,7 @@
|
||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/cli/cli/config/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Store is the interface that any credentials store must implement.
|
// Store is the interface that any credentials store must implement.
|
||||||
|
|
23
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
23
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
|
@ -1,8 +1,9 @@
|
||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/docker/docker/api/types"
|
"strings"
|
||||||
"github.com/docker/docker/registry"
|
|
||||||
|
"github.com/docker/cli/cli/config/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type store interface {
|
type store interface {
|
||||||
|
@ -35,7 +36,7 @@ func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) {
|
||||||
// Maybe they have a legacy config file, we will iterate the keys converting
|
// Maybe they have a legacy config file, we will iterate the keys converting
|
||||||
// them to the new format and testing
|
// them to the new format and testing
|
||||||
for r, ac := range c.file.GetAuthConfigs() {
|
for r, ac := range c.file.GetAuthConfigs() {
|
||||||
if serverAddress == registry.ConvertToHostname(r) {
|
if serverAddress == ConvertToHostname(r) {
|
||||||
return ac, nil
|
return ac, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -62,3 +63,19 @@ func (c *fileStore) GetFilename() string {
|
||||||
func (c *fileStore) IsFileStore() bool {
|
func (c *fileStore) IsFileStore() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConvertToHostname converts a registry url which has http|https prepended
|
||||||
|
// to just an hostname.
|
||||||
|
// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies.
|
||||||
|
func ConvertToHostname(url string) string {
|
||||||
|
stripped := url
|
||||||
|
if strings.HasPrefix(url, "http://") {
|
||||||
|
stripped = strings.TrimPrefix(url, "http://")
|
||||||
|
} else if strings.HasPrefix(url, "https://") {
|
||||||
|
stripped = strings.TrimPrefix(url, "https://")
|
||||||
|
}
|
||||||
|
|
||||||
|
nameParts := strings.SplitN(stripped, "/", 2)
|
||||||
|
|
||||||
|
return nameParts[0]
|
||||||
|
}
|
||||||
|
|
2
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
2
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
|
@ -1,9 +1,9 @@
|
||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/docker/cli/cli/config/types"
|
||||||
"github.com/docker/docker-credential-helpers/client"
|
"github.com/docker/docker-credential-helpers/client"
|
||||||
"github.com/docker/docker-credential-helpers/credentials"
|
"github.com/docker/docker-credential-helpers/credentials"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
22
vendor/github.com/docker/cli/cli/config/types/authconfig.go
generated
vendored
Normal file
22
vendor/github.com/docker/cli/cli/config/types/authconfig.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package types
|
||||||
|
|
||||||
|
// AuthConfig contains authorization information for connecting to a Registry
|
||||||
|
type AuthConfig struct {
|
||||||
|
Username string `json:"username,omitempty"`
|
||||||
|
Password string `json:"password,omitempty"`
|
||||||
|
Auth string `json:"auth,omitempty"`
|
||||||
|
|
||||||
|
// Email is an optional value associated with the username.
|
||||||
|
// This field is deprecated and will be removed in a later
|
||||||
|
// version of docker.
|
||||||
|
Email string `json:"email,omitempty"`
|
||||||
|
|
||||||
|
ServerAddress string `json:"serveraddress,omitempty"`
|
||||||
|
|
||||||
|
// IdentityToken is used to authenticate the user and get
|
||||||
|
// an access token for the registry.
|
||||||
|
IdentityToken string `json:"identitytoken,omitempty"`
|
||||||
|
|
||||||
|
// RegistryToken is a bearer token to be sent to a registry
|
||||||
|
RegistryToken string `json:"registrytoken,omitempty"`
|
||||||
|
}
|
98
vendor/github.com/docker/cli/opts/config.go
generated
vendored
98
vendor/github.com/docker/cli/opts/config.go
generated
vendored
|
@ -1,98 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConfigOpt is a Value type for parsing configs
|
|
||||||
type ConfigOpt struct {
|
|
||||||
values []*swarmtypes.ConfigReference
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new config value
|
|
||||||
func (o *ConfigOpt) Set(value string) error {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(value))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
options := &swarmtypes.ConfigReference{
|
|
||||||
File: &swarmtypes.ConfigReferenceFileTarget{
|
|
||||||
UID: "0",
|
|
||||||
GID: "0",
|
|
||||||
Mode: 0444,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// support a simple syntax of --config foo
|
|
||||||
if len(fields) == 1 {
|
|
||||||
options.File.Name = fields[0]
|
|
||||||
options.ConfigName = fields[0]
|
|
||||||
o.values = append(o.values, options)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "source", "src":
|
|
||||||
options.ConfigName = value
|
|
||||||
case "target":
|
|
||||||
options.File.Name = value
|
|
||||||
case "uid":
|
|
||||||
options.File.UID = value
|
|
||||||
case "gid":
|
|
||||||
options.File.GID = value
|
|
||||||
case "mode":
|
|
||||||
m, err := strconv.ParseUint(value, 0, 32)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid mode specified: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
options.File.Mode = os.FileMode(m)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid field in config request: %s", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.ConfigName == "" {
|
|
||||||
return fmt.Errorf("source is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
o.values = append(o.values, options)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option
|
|
||||||
func (o *ConfigOpt) Type() string {
|
|
||||||
return "config"
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string repr of this option
|
|
||||||
func (o *ConfigOpt) String() string {
|
|
||||||
configs := []string{}
|
|
||||||
for _, config := range o.values {
|
|
||||||
repr := fmt.Sprintf("%s -> %s", config.ConfigName, config.File.Name)
|
|
||||||
configs = append(configs, repr)
|
|
||||||
}
|
|
||||||
return strings.Join(configs, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the config requests
|
|
||||||
func (o *ConfigOpt) Value() []*swarmtypes.ConfigReference {
|
|
||||||
return o.values
|
|
||||||
}
|
|
64
vendor/github.com/docker/cli/opts/duration.go
generated
vendored
64
vendor/github.com/docker/cli/opts/duration.go
generated
vendored
|
@ -1,64 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PositiveDurationOpt is an option type for time.Duration that uses a pointer.
|
|
||||||
// It behave similarly to DurationOpt but only allows positive duration values.
|
|
||||||
type PositiveDurationOpt struct {
|
|
||||||
DurationOpt
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new value on the option. Setting a negative duration value will cause
|
|
||||||
// an error to be returned.
|
|
||||||
func (d *PositiveDurationOpt) Set(s string) error {
|
|
||||||
err := d.DurationOpt.Set(s)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if *d.DurationOpt.value < 0 {
|
|
||||||
return errors.Errorf("duration cannot be negative")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DurationOpt is an option type for time.Duration that uses a pointer. This
|
|
||||||
// allows us to get nil values outside, instead of defaulting to 0
|
|
||||||
type DurationOpt struct {
|
|
||||||
value *time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDurationOpt creates a DurationOpt with the specified duration
|
|
||||||
func NewDurationOpt(value *time.Duration) *DurationOpt {
|
|
||||||
return &DurationOpt{
|
|
||||||
value: value,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new value on the option
|
|
||||||
func (d *DurationOpt) Set(s string) error {
|
|
||||||
v, err := time.ParseDuration(s)
|
|
||||||
d.value = &v
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option, which will be displayed in `--help` output
|
|
||||||
func (d *DurationOpt) Type() string {
|
|
||||||
return "duration"
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string repr of this option
|
|
||||||
func (d *DurationOpt) String() string {
|
|
||||||
if d.value != nil {
|
|
||||||
return d.value.String()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the time.Duration
|
|
||||||
func (d *DurationOpt) Value() *time.Duration {
|
|
||||||
return d.value
|
|
||||||
}
|
|
46
vendor/github.com/docker/cli/opts/env.go
generated
vendored
46
vendor/github.com/docker/cli/opts/env.go
generated
vendored
|
@ -1,46 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidateEnv validates an environment variable and returns it.
|
|
||||||
// If no value is specified, it returns the current value using os.Getenv.
|
|
||||||
//
|
|
||||||
// As on ParseEnvFile and related to #16585, environment variable names
|
|
||||||
// are not validate what so ever, it's up to application inside docker
|
|
||||||
// to validate them or not.
|
|
||||||
//
|
|
||||||
// The only validation here is to check if name is empty, per #25099
|
|
||||||
func ValidateEnv(val string) (string, error) {
|
|
||||||
arr := strings.Split(val, "=")
|
|
||||||
if arr[0] == "" {
|
|
||||||
return "", fmt.Errorf("invalid environment variable: %s", val)
|
|
||||||
}
|
|
||||||
if len(arr) > 1 {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
if !doesEnvExist(val) {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func doesEnvExist(name string) bool {
|
|
||||||
for _, entry := range os.Environ() {
|
|
||||||
parts := strings.SplitN(entry, "=", 2)
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
// Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent.
|
|
||||||
if strings.EqualFold(parts[0], name) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if parts[0] == name {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
22
vendor/github.com/docker/cli/opts/envfile.go
generated
vendored
22
vendor/github.com/docker/cli/opts/envfile.go
generated
vendored
|
@ -1,22 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseEnvFile reads a file with environment variables enumerated by lines
|
|
||||||
//
|
|
||||||
// ``Environment variable names used by the utilities in the Shell and
|
|
||||||
// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
|
|
||||||
// letters, digits, and the '_' (underscore) from the characters defined in
|
|
||||||
// Portable Character Set and do not begin with a digit. *But*, other
|
|
||||||
// characters may be permitted by an implementation; applications shall
|
|
||||||
// tolerate the presence of such names.''
|
|
||||||
// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
|
|
||||||
//
|
|
||||||
// As of #16585, it's up to application inside docker to validate or not
|
|
||||||
// environment variables, that's why we just strip leading whitespace and
|
|
||||||
// nothing more.
|
|
||||||
func ParseEnvFile(filename string) ([]string, error) {
|
|
||||||
return parseKeyValueFile(filename, os.LookupEnv)
|
|
||||||
}
|
|
77
vendor/github.com/docker/cli/opts/file.go
generated
vendored
77
vendor/github.com/docker/cli/opts/file.go
generated
vendored
|
@ -1,77 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
var whiteSpaces = " \t"
|
|
||||||
|
|
||||||
// ErrBadKey typed error for bad environment variable
|
|
||||||
type ErrBadKey struct {
|
|
||||||
msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ErrBadKey) Error() string {
|
|
||||||
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseKeyValueFile(filename string, emptyFn func(string) (string, bool)) ([]string, error) {
|
|
||||||
fh, err := os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, err
|
|
||||||
}
|
|
||||||
defer fh.Close()
|
|
||||||
|
|
||||||
lines := []string{}
|
|
||||||
scanner := bufio.NewScanner(fh)
|
|
||||||
currentLine := 0
|
|
||||||
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
|
||||||
for scanner.Scan() {
|
|
||||||
scannedBytes := scanner.Bytes()
|
|
||||||
if !utf8.Valid(scannedBytes) {
|
|
||||||
return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes)
|
|
||||||
}
|
|
||||||
// We trim UTF8 BOM
|
|
||||||
if currentLine == 0 {
|
|
||||||
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
|
|
||||||
}
|
|
||||||
// trim the line from all leading whitespace first
|
|
||||||
line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace)
|
|
||||||
currentLine++
|
|
||||||
// line is not empty, and not starting with '#'
|
|
||||||
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
|
||||||
data := strings.SplitN(line, "=", 2)
|
|
||||||
|
|
||||||
// trim the front of a variable, but nothing else
|
|
||||||
variable := strings.TrimLeft(data[0], whiteSpaces)
|
|
||||||
if strings.ContainsAny(variable, whiteSpaces) {
|
|
||||||
return []string{}, ErrBadKey{fmt.Sprintf("variable '%s' has white spaces", variable)}
|
|
||||||
}
|
|
||||||
if len(variable) == 0 {
|
|
||||||
return []string{}, ErrBadKey{fmt.Sprintf("no variable name on line '%s'", line)}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(data) > 1 {
|
|
||||||
// pass the value through, no trimming
|
|
||||||
lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
|
|
||||||
} else {
|
|
||||||
var value string
|
|
||||||
var present bool
|
|
||||||
if emptyFn != nil {
|
|
||||||
value, present = emptyFn(line)
|
|
||||||
}
|
|
||||||
if present {
|
|
||||||
// if only a pass-through variable is given, clean it up.
|
|
||||||
lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lines, scanner.Err()
|
|
||||||
}
|
|
165
vendor/github.com/docker/cli/opts/hosts.go
generated
vendored
165
vendor/github.com/docker/cli/opts/hosts.go
generated
vendored
|
@ -1,165 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp://
|
|
||||||
// These are the IANA registered port numbers for use with Docker
|
|
||||||
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
|
|
||||||
DefaultHTTPPort = 2375 // Default HTTP Port
|
|
||||||
// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
|
|
||||||
DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
|
|
||||||
// DefaultUnixSocket Path for the unix socket.
|
|
||||||
// Docker daemon by default always listens on the default unix socket
|
|
||||||
DefaultUnixSocket = "/var/run/docker.sock"
|
|
||||||
// DefaultTCPHost constant defines the default host string used by docker on Windows
|
|
||||||
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
|
|
||||||
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
|
|
||||||
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
|
|
||||||
// DefaultNamedPipe defines the default named pipe used by docker on Windows
|
|
||||||
DefaultNamedPipe = `//./pipe/docker_engine`
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidateHost validates that the specified string is a valid host and returns it.
|
|
||||||
func ValidateHost(val string) (string, error) {
|
|
||||||
host := strings.TrimSpace(val)
|
|
||||||
// The empty string means default and is not handled by parseDockerDaemonHost
|
|
||||||
if host != "" {
|
|
||||||
_, err := parseDockerDaemonHost(host)
|
|
||||||
if err != nil {
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Note: unlike most flag validators, we don't return the mutated value here
|
|
||||||
// we need to know what the user entered later (using ParseHost) to adjust for TLS
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseHost and set defaults for a Daemon host string
|
|
||||||
func ParseHost(defaultToTLS bool, val string) (string, error) {
|
|
||||||
host := strings.TrimSpace(val)
|
|
||||||
if host == "" {
|
|
||||||
if defaultToTLS {
|
|
||||||
host = DefaultTLSHost
|
|
||||||
} else {
|
|
||||||
host = DefaultHost
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var err error
|
|
||||||
host, err = parseDockerDaemonHost(host)
|
|
||||||
if err != nil {
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return host, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
|
|
||||||
// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
|
|
||||||
func parseDockerDaemonHost(addr string) (string, error) {
|
|
||||||
addrParts := strings.SplitN(addr, "://", 2)
|
|
||||||
if len(addrParts) == 1 && addrParts[0] != "" {
|
|
||||||
addrParts = []string{"tcp", addrParts[0]}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch addrParts[0] {
|
|
||||||
case "tcp":
|
|
||||||
return ParseTCPAddr(addrParts[1], DefaultTCPHost)
|
|
||||||
case "unix":
|
|
||||||
return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
|
|
||||||
case "npipe":
|
|
||||||
return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
|
|
||||||
case "fd":
|
|
||||||
return addr, nil
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseSimpleProtoAddr parses and validates that the specified address is a valid
|
|
||||||
// socket address for simple protocols like unix and npipe. It returns a formatted
|
|
||||||
// socket address, either using the address parsed from addr, or the contents of
|
|
||||||
// defaultAddr if addr is a blank string.
|
|
||||||
func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
|
|
||||||
addr = strings.TrimPrefix(addr, proto+"://")
|
|
||||||
if strings.Contains(addr, "://") {
|
|
||||||
return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
|
|
||||||
}
|
|
||||||
if addr == "" {
|
|
||||||
addr = defaultAddr
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s://%s", proto, addr), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseTCPAddr parses and validates that the specified address is a valid TCP
|
|
||||||
// address. It returns a formatted TCP address, either using the address parsed
|
|
||||||
// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
|
|
||||||
// tryAddr is expected to have already been Trim()'d
|
|
||||||
// defaultAddr must be in the full `tcp://host:port` form
|
|
||||||
func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
|
|
||||||
if tryAddr == "" || tryAddr == "tcp://" {
|
|
||||||
return defaultAddr, nil
|
|
||||||
}
|
|
||||||
addr := strings.TrimPrefix(tryAddr, "tcp://")
|
|
||||||
if strings.Contains(addr, "://") || addr == "" {
|
|
||||||
return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
|
|
||||||
defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
// url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
|
|
||||||
// not 1.4. See https://github.com/golang/go/issues/12200 and
|
|
||||||
// https://github.com/golang/go/issues/6530.
|
|
||||||
if strings.HasSuffix(addr, "]:") {
|
|
||||||
addr += defaultPort
|
|
||||||
}
|
|
||||||
|
|
||||||
u, err := url.Parse("tcp://" + addr)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
host, port, err := net.SplitHostPort(u.Host)
|
|
||||||
if err != nil {
|
|
||||||
// try port addition once
|
|
||||||
host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if host == "" {
|
|
||||||
host = defaultHost
|
|
||||||
}
|
|
||||||
if port == "" {
|
|
||||||
port = defaultPort
|
|
||||||
}
|
|
||||||
p, err := strconv.Atoi(port)
|
|
||||||
if err != nil && p == 0 {
|
|
||||||
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
|
|
||||||
// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6).
|
|
||||||
func ValidateExtraHost(val string) (string, error) {
|
|
||||||
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
|
|
||||||
arr := strings.SplitN(val, ":", 2)
|
|
||||||
if len(arr) != 2 || len(arr[0]) == 0 {
|
|
||||||
return "", fmt.Errorf("bad format for add-host: %q", val)
|
|
||||||
}
|
|
||||||
if _, err := ValidateIPAddress(arr[1]); err != nil {
|
|
||||||
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
8
vendor/github.com/docker/cli/opts/hosts_unix.go
generated
vendored
8
vendor/github.com/docker/cli/opts/hosts_unix.go
generated
vendored
|
@ -1,8 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package opts
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// DefaultHost constant defines the default host string used by docker on other hosts than Windows
|
|
||||||
var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
|
|
6
vendor/github.com/docker/cli/opts/hosts_windows.go
generated
vendored
6
vendor/github.com/docker/cli/opts/hosts_windows.go
generated
vendored
|
@ -1,6 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package opts
|
|
||||||
|
|
||||||
// DefaultHost constant defines the default host string used by docker on Windows
|
|
||||||
var DefaultHost = "npipe://" + DefaultNamedPipe
|
|
47
vendor/github.com/docker/cli/opts/ip.go
generated
vendored
47
vendor/github.com/docker/cli/opts/ip.go
generated
vendored
|
@ -1,47 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IPOpt holds an IP. It is used to store values from CLI flags.
|
|
||||||
type IPOpt struct {
|
|
||||||
*net.IP
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPOpt creates a new IPOpt from a reference net.IP and a
|
|
||||||
// string representation of an IP. If the string is not a valid
|
|
||||||
// IP it will fallback to the specified reference.
|
|
||||||
func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
|
|
||||||
o := &IPOpt{
|
|
||||||
IP: ref,
|
|
||||||
}
|
|
||||||
o.Set(defaultVal)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets an IPv4 or IPv6 address from a given string. If the given
|
|
||||||
// string is not parseable as an IP address it returns an error.
|
|
||||||
func (o *IPOpt) Set(val string) error {
|
|
||||||
ip := net.ParseIP(val)
|
|
||||||
if ip == nil {
|
|
||||||
return fmt.Errorf("%s is not an ip address", val)
|
|
||||||
}
|
|
||||||
*o.IP = ip
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the IP address stored in the IPOpt. If stored IP is a
|
|
||||||
// nil pointer, it returns an empty string.
|
|
||||||
func (o *IPOpt) String() string {
|
|
||||||
if *o.IP == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return o.IP.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of the option
|
|
||||||
func (o *IPOpt) Type() string {
|
|
||||||
return "ip"
|
|
||||||
}
|
|
174
vendor/github.com/docker/cli/opts/mount.go
generated
vendored
174
vendor/github.com/docker/cli/opts/mount.go
generated
vendored
|
@ -1,174 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
mounttypes "github.com/docker/docker/api/types/mount"
|
|
||||||
"github.com/docker/go-units"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MountOpt is a Value type for parsing mounts
|
|
||||||
type MountOpt struct {
|
|
||||||
values []mounttypes.Mount
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new mount value
|
|
||||||
// nolint: gocyclo
|
|
||||||
func (m *MountOpt) Set(value string) error {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(value))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mount := mounttypes.Mount{}
|
|
||||||
|
|
||||||
volumeOptions := func() *mounttypes.VolumeOptions {
|
|
||||||
if mount.VolumeOptions == nil {
|
|
||||||
mount.VolumeOptions = &mounttypes.VolumeOptions{
|
|
||||||
Labels: make(map[string]string),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if mount.VolumeOptions.DriverConfig == nil {
|
|
||||||
mount.VolumeOptions.DriverConfig = &mounttypes.Driver{}
|
|
||||||
}
|
|
||||||
return mount.VolumeOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
bindOptions := func() *mounttypes.BindOptions {
|
|
||||||
if mount.BindOptions == nil {
|
|
||||||
mount.BindOptions = new(mounttypes.BindOptions)
|
|
||||||
}
|
|
||||||
return mount.BindOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpfsOptions := func() *mounttypes.TmpfsOptions {
|
|
||||||
if mount.TmpfsOptions == nil {
|
|
||||||
mount.TmpfsOptions = new(mounttypes.TmpfsOptions)
|
|
||||||
}
|
|
||||||
return mount.TmpfsOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
setValueOnMap := func(target map[string]string, value string) {
|
|
||||||
parts := strings.SplitN(value, "=", 2)
|
|
||||||
if len(parts) == 1 {
|
|
||||||
target[value] = ""
|
|
||||||
} else {
|
|
||||||
target[parts[0]] = parts[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mount.Type = mounttypes.TypeVolume // default to volume mounts
|
|
||||||
// Set writable as the default
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
|
|
||||||
if len(parts) == 1 {
|
|
||||||
switch key {
|
|
||||||
case "readonly", "ro":
|
|
||||||
mount.ReadOnly = true
|
|
||||||
continue
|
|
||||||
case "volume-nocopy":
|
|
||||||
volumeOptions().NoCopy = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "type":
|
|
||||||
mount.Type = mounttypes.Type(strings.ToLower(value))
|
|
||||||
case "source", "src":
|
|
||||||
mount.Source = value
|
|
||||||
case "target", "dst", "destination":
|
|
||||||
mount.Target = value
|
|
||||||
case "readonly", "ro":
|
|
||||||
mount.ReadOnly, err = strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
|
||||||
}
|
|
||||||
case "consistency":
|
|
||||||
mount.Consistency = mounttypes.Consistency(strings.ToLower(value))
|
|
||||||
case "bind-propagation":
|
|
||||||
bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value))
|
|
||||||
case "volume-nocopy":
|
|
||||||
volumeOptions().NoCopy, err = strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid value for volume-nocopy: %s", value)
|
|
||||||
}
|
|
||||||
case "volume-label":
|
|
||||||
setValueOnMap(volumeOptions().Labels, value)
|
|
||||||
case "volume-driver":
|
|
||||||
volumeOptions().DriverConfig.Name = value
|
|
||||||
case "volume-opt":
|
|
||||||
if volumeOptions().DriverConfig.Options == nil {
|
|
||||||
volumeOptions().DriverConfig.Options = make(map[string]string)
|
|
||||||
}
|
|
||||||
setValueOnMap(volumeOptions().DriverConfig.Options, value)
|
|
||||||
case "tmpfs-size":
|
|
||||||
sizeBytes, err := units.RAMInBytes(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
|
||||||
}
|
|
||||||
tmpfsOptions().SizeBytes = sizeBytes
|
|
||||||
case "tmpfs-mode":
|
|
||||||
ui64, err := strconv.ParseUint(value, 8, 32)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
|
||||||
}
|
|
||||||
tmpfsOptions().Mode = os.FileMode(ui64)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if mount.Type == "" {
|
|
||||||
return fmt.Errorf("type is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
if mount.Target == "" {
|
|
||||||
return fmt.Errorf("target is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume {
|
|
||||||
return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type)
|
|
||||||
}
|
|
||||||
if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind {
|
|
||||||
return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type)
|
|
||||||
}
|
|
||||||
if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs {
|
|
||||||
return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.values = append(m.values, mount)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option
|
|
||||||
func (m *MountOpt) Type() string {
|
|
||||||
return "mount"
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string repr of this option
|
|
||||||
func (m *MountOpt) String() string {
|
|
||||||
mounts := []string{}
|
|
||||||
for _, mount := range m.values {
|
|
||||||
repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target)
|
|
||||||
mounts = append(mounts, repr)
|
|
||||||
}
|
|
||||||
return strings.Join(mounts, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the mounts
|
|
||||||
func (m *MountOpt) Value() []mounttypes.Mount {
|
|
||||||
return m.values
|
|
||||||
}
|
|
106
vendor/github.com/docker/cli/opts/network.go
generated
vendored
106
vendor/github.com/docker/cli/opts/network.go
generated
vendored
|
@ -1,106 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
networkOptName = "name"
|
|
||||||
networkOptAlias = "alias"
|
|
||||||
driverOpt = "driver-opt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NetworkAttachmentOpts represents the network options for endpoint creation
|
|
||||||
type NetworkAttachmentOpts struct {
|
|
||||||
Target string
|
|
||||||
Aliases []string
|
|
||||||
DriverOpts map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkOpt represents a network config in swarm mode.
|
|
||||||
type NetworkOpt struct {
|
|
||||||
options []NetworkAttachmentOpts
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set networkopts value
|
|
||||||
func (n *NetworkOpt) Set(value string) error {
|
|
||||||
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var netOpt NetworkAttachmentOpts
|
|
||||||
if longSyntax {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(value))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
netOpt.Aliases = []string{}
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
|
|
||||||
if len(parts) < 2 {
|
|
||||||
return fmt.Errorf("invalid field %s", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
key := strings.TrimSpace(strings.ToLower(parts[0]))
|
|
||||||
value := strings.TrimSpace(strings.ToLower(parts[1]))
|
|
||||||
|
|
||||||
switch key {
|
|
||||||
case networkOptName:
|
|
||||||
netOpt.Target = value
|
|
||||||
case networkOptAlias:
|
|
||||||
netOpt.Aliases = append(netOpt.Aliases, value)
|
|
||||||
case driverOpt:
|
|
||||||
key, value, err = parseDriverOpt(value)
|
|
||||||
if err == nil {
|
|
||||||
if netOpt.DriverOpts == nil {
|
|
||||||
netOpt.DriverOpts = make(map[string]string)
|
|
||||||
}
|
|
||||||
netOpt.DriverOpts[key] = value
|
|
||||||
} else {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid field key %s", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(netOpt.Target) == 0 {
|
|
||||||
return fmt.Errorf("network name/id is not specified")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
netOpt.Target = value
|
|
||||||
}
|
|
||||||
n.options = append(n.options, netOpt)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option
|
|
||||||
func (n *NetworkOpt) Type() string {
|
|
||||||
return "network"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the networkopts
|
|
||||||
func (n *NetworkOpt) Value() []NetworkAttachmentOpts {
|
|
||||||
return n.options
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the network opts as a string
|
|
||||||
func (n *NetworkOpt) String() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDriverOpt(driverOpt string) (string, string, error) {
|
|
||||||
parts := strings.SplitN(driverOpt, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return "", "", fmt.Errorf("invalid key value pair format in driver options")
|
|
||||||
}
|
|
||||||
key := strings.TrimSpace(strings.ToLower(parts[0]))
|
|
||||||
value := strings.TrimSpace(strings.ToLower(parts[1]))
|
|
||||||
return key, value, nil
|
|
||||||
}
|
|
509
vendor/github.com/docker/cli/opts/opts.go
generated
vendored
509
vendor/github.com/docker/cli/opts/opts.go
generated
vendored
|
@ -1,509 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"net"
|
|
||||||
"path"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
|
||||||
units "github.com/docker/go-units"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
|
|
||||||
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// ListOpts holds a list of values and a validation function.
|
|
||||||
type ListOpts struct {
|
|
||||||
values *[]string
|
|
||||||
validator ValidatorFctType
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewListOpts creates a new ListOpts with the specified validator.
|
|
||||||
func NewListOpts(validator ValidatorFctType) ListOpts {
|
|
||||||
var values []string
|
|
||||||
return *NewListOptsRef(&values, validator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewListOptsRef creates a new ListOpts with the specified values and validator.
|
|
||||||
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
|
|
||||||
return &ListOpts{
|
|
||||||
values: values,
|
|
||||||
validator: validator,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts *ListOpts) String() string {
|
|
||||||
if len(*opts.values) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%v", *opts.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set validates if needed the input value and adds it to the
|
|
||||||
// internal slice.
|
|
||||||
func (opts *ListOpts) Set(value string) error {
|
|
||||||
if opts.validator != nil {
|
|
||||||
v, err := opts.validator(value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
value = v
|
|
||||||
}
|
|
||||||
(*opts.values) = append((*opts.values), value)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes the specified element from the slice.
|
|
||||||
func (opts *ListOpts) Delete(key string) {
|
|
||||||
for i, k := range *opts.values {
|
|
||||||
if k == key {
|
|
||||||
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMap returns the content of values in a map in order to avoid
|
|
||||||
// duplicates.
|
|
||||||
func (opts *ListOpts) GetMap() map[string]struct{} {
|
|
||||||
ret := make(map[string]struct{})
|
|
||||||
for _, k := range *opts.values {
|
|
||||||
ret[k] = struct{}{}
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAll returns the values of slice.
|
|
||||||
func (opts *ListOpts) GetAll() []string {
|
|
||||||
return (*opts.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAllOrEmpty returns the values of the slice
|
|
||||||
// or an empty slice when there are no values.
|
|
||||||
func (opts *ListOpts) GetAllOrEmpty() []string {
|
|
||||||
v := *opts.values
|
|
||||||
if v == nil {
|
|
||||||
return make([]string, 0)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get checks the existence of the specified key.
|
|
||||||
func (opts *ListOpts) Get(key string) bool {
|
|
||||||
for _, k := range *opts.values {
|
|
||||||
if k == key {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the amount of element in the slice.
|
|
||||||
func (opts *ListOpts) Len() int {
|
|
||||||
return len((*opts.values))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns a string name for this Option type
|
|
||||||
func (opts *ListOpts) Type() string {
|
|
||||||
return "list"
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithValidator returns the ListOpts with validator set.
|
|
||||||
func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts {
|
|
||||||
opts.validator = validator
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedOption is an interface that list and map options
|
|
||||||
// with names implement.
|
|
||||||
type NamedOption interface {
|
|
||||||
Name() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedListOpts is a ListOpts with a configuration name.
|
|
||||||
// This struct is useful to keep reference to the assigned
|
|
||||||
// field name in the internal configuration struct.
|
|
||||||
type NamedListOpts struct {
|
|
||||||
name string
|
|
||||||
ListOpts
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ NamedOption = &NamedListOpts{}
|
|
||||||
|
|
||||||
// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
|
|
||||||
func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
|
|
||||||
return &NamedListOpts{
|
|
||||||
name: name,
|
|
||||||
ListOpts: *NewListOptsRef(values, validator),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the NamedListOpts in the configuration.
|
|
||||||
func (o *NamedListOpts) Name() string {
|
|
||||||
return o.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapOpts holds a map of values and a validation function.
|
|
||||||
type MapOpts struct {
|
|
||||||
values map[string]string
|
|
||||||
validator ValidatorFctType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set validates if needed the input value and add it to the
|
|
||||||
// internal map, by splitting on '='.
|
|
||||||
func (opts *MapOpts) Set(value string) error {
|
|
||||||
if opts.validator != nil {
|
|
||||||
v, err := opts.validator(value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
value = v
|
|
||||||
}
|
|
||||||
vals := strings.SplitN(value, "=", 2)
|
|
||||||
if len(vals) == 1 {
|
|
||||||
(opts.values)[vals[0]] = ""
|
|
||||||
} else {
|
|
||||||
(opts.values)[vals[0]] = vals[1]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAll returns the values of MapOpts as a map.
|
|
||||||
func (opts *MapOpts) GetAll() map[string]string {
|
|
||||||
return opts.values
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts *MapOpts) String() string {
|
|
||||||
return fmt.Sprintf("%v", opts.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns a string name for this Option type
|
|
||||||
func (opts *MapOpts) Type() string {
|
|
||||||
return "map"
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
|
|
||||||
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
|
|
||||||
if values == nil {
|
|
||||||
values = make(map[string]string)
|
|
||||||
}
|
|
||||||
return &MapOpts{
|
|
||||||
values: values,
|
|
||||||
validator: validator,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedMapOpts is a MapOpts struct with a configuration name.
|
|
||||||
// This struct is useful to keep reference to the assigned
|
|
||||||
// field name in the internal configuration struct.
|
|
||||||
type NamedMapOpts struct {
|
|
||||||
name string
|
|
||||||
MapOpts
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ NamedOption = &NamedMapOpts{}
|
|
||||||
|
|
||||||
// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
|
|
||||||
func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
|
|
||||||
return &NamedMapOpts{
|
|
||||||
name: name,
|
|
||||||
MapOpts: *NewMapOpts(values, validator),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the NamedMapOpts in the configuration.
|
|
||||||
func (o *NamedMapOpts) Name() string {
|
|
||||||
return o.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
|
|
||||||
type ValidatorFctType func(val string) (string, error)
|
|
||||||
|
|
||||||
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
|
|
||||||
type ValidatorFctListType func(val string) ([]string, error)
|
|
||||||
|
|
||||||
// ValidateIPAddress validates an Ip address.
|
|
||||||
func ValidateIPAddress(val string) (string, error) {
|
|
||||||
var ip = net.ParseIP(strings.TrimSpace(val))
|
|
||||||
if ip != nil {
|
|
||||||
return ip.String(), nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("%s is not an ip address", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateMACAddress validates a MAC address.
|
|
||||||
func ValidateMACAddress(val string) (string, error) {
|
|
||||||
_, err := net.ParseMAC(strings.TrimSpace(val))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateDNSSearch validates domain for resolvconf search configuration.
|
|
||||||
// A zero length domain is represented by a dot (.).
|
|
||||||
func ValidateDNSSearch(val string) (string, error) {
|
|
||||||
if val = strings.Trim(val, " "); val == "." {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
return validateDomain(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateDomain(val string) (string, error) {
|
|
||||||
if alphaRegexp.FindString(val) == "" {
|
|
||||||
return "", fmt.Errorf("%s is not a valid domain", val)
|
|
||||||
}
|
|
||||||
ns := domainRegexp.FindSubmatch([]byte(val))
|
|
||||||
if len(ns) > 0 && len(ns[1]) < 255 {
|
|
||||||
return string(ns[1]), nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("%s is not a valid domain", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateLabel validates that the specified string is a valid label, and returns it.
|
|
||||||
// Labels are in the form on key=value.
|
|
||||||
func ValidateLabel(val string) (string, error) {
|
|
||||||
if strings.Count(val, "=") < 1 {
|
|
||||||
return "", fmt.Errorf("bad attribute format: %s", val)
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateSysctl validates a sysctl and returns it.
|
|
||||||
func ValidateSysctl(val string) (string, error) {
|
|
||||||
validSysctlMap := map[string]bool{
|
|
||||||
"kernel.msgmax": true,
|
|
||||||
"kernel.msgmnb": true,
|
|
||||||
"kernel.msgmni": true,
|
|
||||||
"kernel.sem": true,
|
|
||||||
"kernel.shmall": true,
|
|
||||||
"kernel.shmmax": true,
|
|
||||||
"kernel.shmmni": true,
|
|
||||||
"kernel.shm_rmid_forced": true,
|
|
||||||
}
|
|
||||||
validSysctlPrefixes := []string{
|
|
||||||
"net.",
|
|
||||||
"fs.mqueue.",
|
|
||||||
}
|
|
||||||
arr := strings.Split(val, "=")
|
|
||||||
if len(arr) < 2 {
|
|
||||||
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
|
||||||
}
|
|
||||||
if validSysctlMap[arr[0]] {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, vp := range validSysctlPrefixes {
|
|
||||||
if strings.HasPrefix(arr[0], vp) {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateProgressOutput errors out if an invalid value is passed to --progress
|
|
||||||
func ValidateProgressOutput(val string) error {
|
|
||||||
valid := []string{"auto", "plain", "tty"}
|
|
||||||
for _, s := range valid {
|
|
||||||
if s == val {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("invalid value %q passed to --progress, valid values are: %s", val, strings.Join(valid, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterOpt is a flag type for validating filters
|
|
||||||
type FilterOpt struct {
|
|
||||||
filter filters.Args
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFilterOpt returns a new FilterOpt
|
|
||||||
func NewFilterOpt() FilterOpt {
|
|
||||||
return FilterOpt{filter: filters.NewArgs()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *FilterOpt) String() string {
|
|
||||||
repr, err := filters.ToJSON(o.filter)
|
|
||||||
if err != nil {
|
|
||||||
return "invalid filters"
|
|
||||||
}
|
|
||||||
return repr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the value of the opt by parsing the command line value
|
|
||||||
func (o *FilterOpt) Set(value string) error {
|
|
||||||
if value == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !strings.Contains(value, "=") {
|
|
||||||
return errors.New("bad format of filter (expected name=value)")
|
|
||||||
}
|
|
||||||
f := strings.SplitN(value, "=", 2)
|
|
||||||
name := strings.ToLower(strings.TrimSpace(f[0]))
|
|
||||||
value = strings.TrimSpace(f[1])
|
|
||||||
|
|
||||||
o.filter.Add(name, value)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the option type
|
|
||||||
func (o *FilterOpt) Type() string {
|
|
||||||
return "filter"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the value of this option
|
|
||||||
func (o *FilterOpt) Value() filters.Args {
|
|
||||||
return o.filter
|
|
||||||
}
|
|
||||||
|
|
||||||
// NanoCPUs is a type for fixed point fractional number.
|
|
||||||
type NanoCPUs int64
|
|
||||||
|
|
||||||
// String returns the string format of the number
|
|
||||||
func (c *NanoCPUs) String() string {
|
|
||||||
if *c == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return big.NewRat(c.Value(), 1e9).FloatString(3)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the value of the NanoCPU by passing a string
|
|
||||||
func (c *NanoCPUs) Set(value string) error {
|
|
||||||
cpus, err := ParseCPUs(value)
|
|
||||||
*c = NanoCPUs(cpus)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type
|
|
||||||
func (c *NanoCPUs) Type() string {
|
|
||||||
return "decimal"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the value in int64
|
|
||||||
func (c *NanoCPUs) Value() int64 {
|
|
||||||
return int64(*c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseCPUs takes a string ratio and returns an integer value of nano cpus
|
|
||||||
func ParseCPUs(value string) (int64, error) {
|
|
||||||
cpu, ok := new(big.Rat).SetString(value)
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("failed to parse %v as a rational number", value)
|
|
||||||
}
|
|
||||||
nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
|
|
||||||
if !nano.IsInt() {
|
|
||||||
return 0, fmt.Errorf("value is too precise")
|
|
||||||
}
|
|
||||||
return nano.Num().Int64(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseLink parses and validates the specified string as a link format (name:alias)
|
|
||||||
func ParseLink(val string) (string, string, error) {
|
|
||||||
if val == "" {
|
|
||||||
return "", "", fmt.Errorf("empty string specified for links")
|
|
||||||
}
|
|
||||||
arr := strings.Split(val, ":")
|
|
||||||
if len(arr) > 2 {
|
|
||||||
return "", "", fmt.Errorf("bad format for links: %s", val)
|
|
||||||
}
|
|
||||||
if len(arr) == 1 {
|
|
||||||
return val, val, nil
|
|
||||||
}
|
|
||||||
// This is kept because we can actually get a HostConfig with links
|
|
||||||
// from an already created container and the format is not `foo:bar`
|
|
||||||
// but `/foo:/c1/bar`
|
|
||||||
if strings.HasPrefix(arr[0], "/") {
|
|
||||||
_, alias := path.Split(arr[1])
|
|
||||||
return arr[0][1:], alias, nil
|
|
||||||
}
|
|
||||||
return arr[0], arr[1], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateLink validates that the specified string has a valid link format (containerName:alias).
|
|
||||||
func ValidateLink(val string) (string, error) {
|
|
||||||
_, _, err := ParseLink(val)
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc)
|
|
||||||
type MemBytes int64
|
|
||||||
|
|
||||||
// String returns the string format of the human readable memory bytes
|
|
||||||
func (m *MemBytes) String() string {
|
|
||||||
// NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not.
|
|
||||||
// We return "0" in case value is 0 here so that the default value is hidden.
|
|
||||||
// (Sometimes "default 0 B" is actually misleading)
|
|
||||||
if m.Value() != 0 {
|
|
||||||
return units.BytesSize(float64(m.Value()))
|
|
||||||
}
|
|
||||||
return "0"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the value of the MemBytes by passing a string
|
|
||||||
func (m *MemBytes) Set(value string) error {
|
|
||||||
val, err := units.RAMInBytes(value)
|
|
||||||
*m = MemBytes(val)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type
|
|
||||||
func (m *MemBytes) Type() string {
|
|
||||||
return "bytes"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the value in int64
|
|
||||||
func (m *MemBytes) Value() int64 {
|
|
||||||
return int64(*m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON is the customized unmarshaler for MemBytes
|
|
||||||
func (m *MemBytes) UnmarshalJSON(s []byte) error {
|
|
||||||
if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' {
|
|
||||||
return fmt.Errorf("invalid size: %q", s)
|
|
||||||
}
|
|
||||||
val, err := units.RAMInBytes(string(s[1 : len(s)-1]))
|
|
||||||
*m = MemBytes(val)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemSwapBytes is a type for human readable memory bytes (like 128M, 2g, etc).
|
|
||||||
// It differs from MemBytes in that -1 is valid and the default.
|
|
||||||
type MemSwapBytes int64
|
|
||||||
|
|
||||||
// Set sets the value of the MemSwapBytes by passing a string
|
|
||||||
func (m *MemSwapBytes) Set(value string) error {
|
|
||||||
if value == "-1" {
|
|
||||||
*m = MemSwapBytes(-1)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
val, err := units.RAMInBytes(value)
|
|
||||||
*m = MemSwapBytes(val)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type
|
|
||||||
func (m *MemSwapBytes) Type() string {
|
|
||||||
return "bytes"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the value in int64
|
|
||||||
func (m *MemSwapBytes) Value() int64 {
|
|
||||||
return int64(*m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MemSwapBytes) String() string {
|
|
||||||
b := MemBytes(*m)
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON is the customized unmarshaler for MemSwapBytes
|
|
||||||
func (m *MemSwapBytes) UnmarshalJSON(s []byte) error {
|
|
||||||
b := MemBytes(*m)
|
|
||||||
return b.UnmarshalJSON(s)
|
|
||||||
}
|
|
6
vendor/github.com/docker/cli/opts/opts_unix.go
generated
vendored
6
vendor/github.com/docker/cli/opts/opts_unix.go
generated
vendored
|
@ -1,6 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package opts
|
|
||||||
|
|
||||||
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
|
|
||||||
const DefaultHTTPHost = "localhost"
|
|
56
vendor/github.com/docker/cli/opts/opts_windows.go
generated
vendored
56
vendor/github.com/docker/cli/opts/opts_windows.go
generated
vendored
|
@ -1,56 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
|
|
||||||
// @jhowardmsft, @swernli.
|
|
||||||
//
|
|
||||||
// On Windows, this mitigates a problem with the default options of running
|
|
||||||
// a docker client against a local docker daemon on TP5.
|
|
||||||
//
|
|
||||||
// What was found that if the default host is "localhost", even if the client
|
|
||||||
// (and daemon as this is local) is not physically on a network, and the DNS
|
|
||||||
// cache is flushed (ipconfig /flushdns), then the client will pause for
|
|
||||||
// exactly one second when connecting to the daemon for calls. For example
|
|
||||||
// using docker run windowsservercore cmd, the CLI will send a create followed
|
|
||||||
// by an attach. You see the delay between the attach finishing and the attach
|
|
||||||
// being seen by the daemon.
|
|
||||||
//
|
|
||||||
// Here's some daemon debug logs with additional debug spew put in. The
|
|
||||||
// AfterWriteJSON log is the very last thing the daemon does as part of the
|
|
||||||
// create call. The POST /attach is the second CLI call. Notice the second
|
|
||||||
// time gap.
|
|
||||||
//
|
|
||||||
// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
|
|
||||||
// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
|
|
||||||
// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
|
|
||||||
// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
|
|
||||||
// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
|
|
||||||
// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
|
|
||||||
// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
|
|
||||||
// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
|
|
||||||
// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
|
|
||||||
// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
|
|
||||||
// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
|
|
||||||
// ... 1 second gap here....
|
|
||||||
// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
|
|
||||||
// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
|
|
||||||
//
|
|
||||||
// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
|
|
||||||
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
|
|
||||||
// the Windows networking stack is supposed to resolve "localhost" internally,
|
|
||||||
// without hitting DNS, or even reading the hosts file (which is why localhost
|
|
||||||
// is commented out in the hosts file on Windows).
|
|
||||||
//
|
|
||||||
// We have validated that working around this using the actual IPv4 localhost
|
|
||||||
// address does not cause the delay.
|
|
||||||
//
|
|
||||||
// This does not occur with the docker client built with 1.4.3 on the same
|
|
||||||
// Windows build, regardless of whether the daemon is built using 1.5.1
|
|
||||||
// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
|
|
||||||
// on a cross-compiled Windows binary (from Linux).
|
|
||||||
//
|
|
||||||
// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
|
|
||||||
// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
|
|
||||||
// explicitly.
|
|
||||||
|
|
||||||
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
|
|
||||||
const DefaultHTTPHost = "127.0.0.1"
|
|
99
vendor/github.com/docker/cli/opts/parse.go
generated
vendored
99
vendor/github.com/docker/cli/opts/parse.go
generated
vendored
|
@ -1,99 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys
|
|
||||||
// present in the file with additional pairs specified in the override parameter
|
|
||||||
func ReadKVStrings(files []string, override []string) ([]string, error) {
|
|
||||||
return readKVStrings(files, override, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadKVEnvStrings reads a file of line terminated key=value pairs, and overrides any keys
|
|
||||||
// present in the file with additional pairs specified in the override parameter.
|
|
||||||
// If a key has no value, it will get the value from the environment.
|
|
||||||
func ReadKVEnvStrings(files []string, override []string) ([]string, error) {
|
|
||||||
return readKVStrings(files, override, os.LookupEnv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func readKVStrings(files []string, override []string, emptyFn func(string) (string, bool)) ([]string, error) {
|
|
||||||
variables := []string{}
|
|
||||||
for _, ef := range files {
|
|
||||||
parsedVars, err := parseKeyValueFile(ef, emptyFn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
variables = append(variables, parsedVars...)
|
|
||||||
}
|
|
||||||
// parse the '-e' and '--env' after, to allow override
|
|
||||||
variables = append(variables, override...)
|
|
||||||
|
|
||||||
return variables, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"}
|
|
||||||
func ConvertKVStringsToMap(values []string) map[string]string {
|
|
||||||
result := make(map[string]string, len(values))
|
|
||||||
for _, value := range values {
|
|
||||||
kv := strings.SplitN(value, "=", 2)
|
|
||||||
if len(kv) == 1 {
|
|
||||||
result[kv[0]] = ""
|
|
||||||
} else {
|
|
||||||
result[kv[0]] = kv[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"}
|
|
||||||
// but set unset keys to nil - meaning the ones with no "=" in them.
|
|
||||||
// We use this in cases where we need to distinguish between
|
|
||||||
// FOO= and FOO
|
|
||||||
// where the latter case just means FOO was mentioned but not given a value
|
|
||||||
func ConvertKVStringsToMapWithNil(values []string) map[string]*string {
|
|
||||||
result := make(map[string]*string, len(values))
|
|
||||||
for _, value := range values {
|
|
||||||
kv := strings.SplitN(value, "=", 2)
|
|
||||||
if len(kv) == 1 {
|
|
||||||
result[kv[0]] = nil
|
|
||||||
} else {
|
|
||||||
result[kv[0]] = &kv[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect
|
|
||||||
func ParseRestartPolicy(policy string) (container.RestartPolicy, error) {
|
|
||||||
p := container.RestartPolicy{}
|
|
||||||
|
|
||||||
if policy == "" {
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Split(policy, ":")
|
|
||||||
|
|
||||||
if len(parts) > 2 {
|
|
||||||
return p, fmt.Errorf("invalid restart policy format")
|
|
||||||
}
|
|
||||||
if len(parts) == 2 {
|
|
||||||
count, err := strconv.Atoi(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return p, fmt.Errorf("maximum retry count must be an integer")
|
|
||||||
}
|
|
||||||
|
|
||||||
p.MaximumRetryCount = count
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Name = parts[0]
|
|
||||||
|
|
||||||
return p, nil
|
|
||||||
}
|
|
172
vendor/github.com/docker/cli/opts/port.go
generated
vendored
172
vendor/github.com/docker/cli/opts/port.go
generated
vendored
|
@ -1,172 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
|
||||||
"github.com/docker/go-connections/nat"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
portOptTargetPort = "target"
|
|
||||||
portOptPublishedPort = "published"
|
|
||||||
portOptProtocol = "protocol"
|
|
||||||
portOptMode = "mode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PortOpt represents a port config in swarm mode.
|
|
||||||
type PortOpt struct {
|
|
||||||
ports []swarm.PortConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new port value
|
|
||||||
// nolint: gocyclo
|
|
||||||
func (p *PortOpt) Set(value string) error {
|
|
||||||
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if longSyntax {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(value))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pConfig := swarm.PortConfig{}
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid field %s", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
value := strings.ToLower(parts[1])
|
|
||||||
|
|
||||||
switch key {
|
|
||||||
case portOptProtocol:
|
|
||||||
if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) && value != string(swarm.PortConfigProtocolSCTP) {
|
|
||||||
return fmt.Errorf("invalid protocol value %s", value)
|
|
||||||
}
|
|
||||||
|
|
||||||
pConfig.Protocol = swarm.PortConfigProtocol(value)
|
|
||||||
case portOptMode:
|
|
||||||
if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) {
|
|
||||||
return fmt.Errorf("invalid publish mode value %s", value)
|
|
||||||
}
|
|
||||||
|
|
||||||
pConfig.PublishMode = swarm.PortConfigPublishMode(value)
|
|
||||||
case portOptTargetPort:
|
|
||||||
tPort, err := strconv.ParseUint(value, 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pConfig.TargetPort = uint32(tPort)
|
|
||||||
case portOptPublishedPort:
|
|
||||||
pPort, err := strconv.ParseUint(value, 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pConfig.PublishedPort = uint32(pPort)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid field key %s", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if pConfig.TargetPort == 0 {
|
|
||||||
return fmt.Errorf("missing mandatory field %q", portOptTargetPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pConfig.PublishMode == "" {
|
|
||||||
pConfig.PublishMode = swarm.PortConfigPublishModeIngress
|
|
||||||
}
|
|
||||||
|
|
||||||
if pConfig.Protocol == "" {
|
|
||||||
pConfig.Protocol = swarm.PortConfigProtocolTCP
|
|
||||||
}
|
|
||||||
|
|
||||||
p.ports = append(p.ports, pConfig)
|
|
||||||
} else {
|
|
||||||
// short syntax
|
|
||||||
portConfigs := []swarm.PortConfig{}
|
|
||||||
ports, portBindingMap, err := nat.ParsePortSpecs([]string{value})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, portBindings := range portBindingMap {
|
|
||||||
for _, portBinding := range portBindings {
|
|
||||||
if portBinding.HostIP != "" {
|
|
||||||
return fmt.Errorf("hostip is not supported")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for port := range ports {
|
|
||||||
portConfig, err := ConvertPortToPortConfig(port, portBindingMap)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
portConfigs = append(portConfigs, portConfig...)
|
|
||||||
}
|
|
||||||
p.ports = append(p.ports, portConfigs...)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option
|
|
||||||
func (p *PortOpt) Type() string {
|
|
||||||
return "port"
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string repr of this option
|
|
||||||
func (p *PortOpt) String() string {
|
|
||||||
ports := []string{}
|
|
||||||
for _, port := range p.ports {
|
|
||||||
repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode)
|
|
||||||
ports = append(ports, repr)
|
|
||||||
}
|
|
||||||
return strings.Join(ports, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the ports
|
|
||||||
func (p *PortOpt) Value() []swarm.PortConfig {
|
|
||||||
return p.ports
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertPortToPortConfig converts ports to the swarm type
|
|
||||||
func ConvertPortToPortConfig(
|
|
||||||
port nat.Port,
|
|
||||||
portBindings map[nat.Port][]nat.PortBinding,
|
|
||||||
) ([]swarm.PortConfig, error) {
|
|
||||||
ports := []swarm.PortConfig{}
|
|
||||||
|
|
||||||
for _, binding := range portBindings[port] {
|
|
||||||
if binding.HostIP != "" && binding.HostIP != "0.0.0.0" {
|
|
||||||
logrus.Warnf("ignoring IP-address (%s:%s:%s) service will listen on '0.0.0.0'", binding.HostIP, binding.HostPort, port)
|
|
||||||
}
|
|
||||||
|
|
||||||
startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort)
|
|
||||||
|
|
||||||
if err != nil && binding.HostPort != "" {
|
|
||||||
return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port())
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := startHostPort; i <= endHostPort; i++ {
|
|
||||||
ports = append(ports, swarm.PortConfig{
|
|
||||||
//TODO Name: ?
|
|
||||||
Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
|
|
||||||
TargetPort: uint32(port.Int()),
|
|
||||||
PublishedPort: uint32(i),
|
|
||||||
PublishMode: swarm.PortConfigPublishModeIngress,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ports, nil
|
|
||||||
}
|
|
37
vendor/github.com/docker/cli/opts/quotedstring.go
generated
vendored
37
vendor/github.com/docker/cli/opts/quotedstring.go
generated
vendored
|
@ -1,37 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
// QuotedString is a string that may have extra quotes around the value. The
|
|
||||||
// quotes are stripped from the value.
|
|
||||||
type QuotedString struct {
|
|
||||||
value *string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets a new value
|
|
||||||
func (s *QuotedString) Set(val string) error {
|
|
||||||
*s.value = trimQuotes(val)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of the value
|
|
||||||
func (s *QuotedString) Type() string {
|
|
||||||
return "string"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *QuotedString) String() string {
|
|
||||||
return *s.value
|
|
||||||
}
|
|
||||||
|
|
||||||
func trimQuotes(value string) string {
|
|
||||||
lastIndex := len(value) - 1
|
|
||||||
for _, char := range []byte{'\'', '"'} {
|
|
||||||
if value[0] == char && value[lastIndex] == char {
|
|
||||||
return value[1:lastIndex]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewQuotedString returns a new quoted string option
|
|
||||||
func NewQuotedString(value *string) *QuotedString {
|
|
||||||
return &QuotedString{value: value}
|
|
||||||
}
|
|
79
vendor/github.com/docker/cli/opts/runtime.go
generated
vendored
79
vendor/github.com/docker/cli/opts/runtime.go
generated
vendored
|
@ -1,79 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RuntimeOpt defines a map of Runtimes
|
|
||||||
type RuntimeOpt struct {
|
|
||||||
name string
|
|
||||||
stockRuntimeName string
|
|
||||||
values *map[string]types.Runtime
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNamedRuntimeOpt creates a new RuntimeOpt
|
|
||||||
func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt {
|
|
||||||
if ref == nil {
|
|
||||||
ref = &map[string]types.Runtime{}
|
|
||||||
}
|
|
||||||
return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the NamedListOpts in the configuration.
|
|
||||||
func (o *RuntimeOpt) Name() string {
|
|
||||||
return o.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set validates and updates the list of Runtimes
|
|
||||||
func (o *RuntimeOpt) Set(val string) error {
|
|
||||||
parts := strings.SplitN(val, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid runtime argument: %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
parts[0] = strings.TrimSpace(parts[0])
|
|
||||||
parts[1] = strings.TrimSpace(parts[1])
|
|
||||||
if parts[0] == "" || parts[1] == "" {
|
|
||||||
return fmt.Errorf("invalid runtime argument: %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
parts[0] = strings.ToLower(parts[0])
|
|
||||||
if parts[0] == o.stockRuntimeName {
|
|
||||||
return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := (*o.values)[parts[0]]; ok {
|
|
||||||
return fmt.Errorf("runtime '%s' was already defined", parts[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
(*o.values)[parts[0]] = types.Runtime{Path: parts[1]}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns Runtime values as a string.
|
|
||||||
func (o *RuntimeOpt) String() string {
|
|
||||||
var out []string
|
|
||||||
for k := range *o.values {
|
|
||||||
out = append(out, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%v", out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMap returns a map of Runtimes (name: path)
|
|
||||||
func (o *RuntimeOpt) GetMap() map[string]types.Runtime {
|
|
||||||
if o.values != nil {
|
|
||||||
return *o.values
|
|
||||||
}
|
|
||||||
|
|
||||||
return map[string]types.Runtime{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of the option
|
|
||||||
func (o *RuntimeOpt) Type() string {
|
|
||||||
return "runtime"
|
|
||||||
}
|
|
98
vendor/github.com/docker/cli/opts/secret.go
generated
vendored
98
vendor/github.com/docker/cli/opts/secret.go
generated
vendored
|
@ -1,98 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SecretOpt is a Value type for parsing secrets
|
|
||||||
type SecretOpt struct {
|
|
||||||
values []*swarmtypes.SecretReference
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new secret value
|
|
||||||
func (o *SecretOpt) Set(value string) error {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(value))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
options := &swarmtypes.SecretReference{
|
|
||||||
File: &swarmtypes.SecretReferenceFileTarget{
|
|
||||||
UID: "0",
|
|
||||||
GID: "0",
|
|
||||||
Mode: 0444,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// support a simple syntax of --secret foo
|
|
||||||
if len(fields) == 1 {
|
|
||||||
options.File.Name = fields[0]
|
|
||||||
options.SecretName = fields[0]
|
|
||||||
o.values = append(o.values, options)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "source", "src":
|
|
||||||
options.SecretName = value
|
|
||||||
case "target":
|
|
||||||
options.File.Name = value
|
|
||||||
case "uid":
|
|
||||||
options.File.UID = value
|
|
||||||
case "gid":
|
|
||||||
options.File.GID = value
|
|
||||||
case "mode":
|
|
||||||
m, err := strconv.ParseUint(value, 0, 32)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid mode specified: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
options.File.Mode = os.FileMode(m)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid field in secret request: %s", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.SecretName == "" {
|
|
||||||
return fmt.Errorf("source is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
o.values = append(o.values, options)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option
|
|
||||||
func (o *SecretOpt) Type() string {
|
|
||||||
return "secret"
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string repr of this option
|
|
||||||
func (o *SecretOpt) String() string {
|
|
||||||
secrets := []string{}
|
|
||||||
for _, secret := range o.values {
|
|
||||||
repr := fmt.Sprintf("%s -> %s", secret.SecretName, secret.File.Name)
|
|
||||||
secrets = append(secrets, repr)
|
|
||||||
}
|
|
||||||
return strings.Join(secrets, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the secret requests
|
|
||||||
func (o *SecretOpt) Value() []*swarmtypes.SecretReference {
|
|
||||||
return o.values
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue